diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2009-09-12 07:04:37 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-09-12 07:04:37 -0400 |
commit | 87d721ad7a37b7650dd710c88dd5c6a5bf9fe996 (patch) | |
tree | 869d633803eb7c429624d3bd16a6117816849763 /drivers/net | |
parent | ddd559b13f6d2fe3ad68c4b3f5235fd3c2eae4e3 (diff) | |
parent | b7cfda9fc3d7aa60cffab5367f2a72a4a70060cd (diff) |
Merge branch 'master' into devel
Diffstat (limited to 'drivers/net')
83 files changed, 781 insertions, 483 deletions
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c index 3e00fa8ea65f..4a7c32895be5 100644 --- a/drivers/net/3c515.c +++ b/drivers/net/3c515.c | |||
@@ -832,7 +832,9 @@ static int corkscrew_open(struct net_device *dev) | |||
832 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 832 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
833 | vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); | 833 | vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); |
834 | } | 834 | } |
835 | vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ | 835 | if (i != 0) |
836 | vp->rx_ring[i - 1].next = | ||
837 | isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ | ||
836 | outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); | 838 | outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); |
837 | } | 839 | } |
838 | if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ | 840 | if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index c34aee91250b..45675889850b 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -235,6 +235,7 @@ enum vortex_chips { | |||
235 | CH_3C900B_FL, | 235 | CH_3C900B_FL, |
236 | CH_3C905_1, | 236 | CH_3C905_1, |
237 | CH_3C905_2, | 237 | CH_3C905_2, |
238 | CH_3C905B_TX, | ||
238 | CH_3C905B_1, | 239 | CH_3C905B_1, |
239 | 240 | ||
240 | CH_3C905B_2, | 241 | CH_3C905B_2, |
@@ -307,6 +308,8 @@ static struct vortex_chip_info { | |||
307 | PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, | 308 | PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, |
308 | {"3c905 Boomerang 100baseT4", | 309 | {"3c905 Boomerang 100baseT4", |
309 | PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, | 310 | PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, |
311 | {"3C905B-TX Fast Etherlink XL PCI", | ||
312 | PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, | ||
310 | {"3c905B Cyclone 100baseTx", | 313 | {"3c905B Cyclone 100baseTx", |
311 | PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, | 314 | PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, |
312 | 315 | ||
@@ -389,6 +392,7 @@ static struct pci_device_id vortex_pci_tbl[] = { | |||
389 | { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL }, | 392 | { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL }, |
390 | { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 }, | 393 | { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 }, |
391 | { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 }, | 394 | { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 }, |
395 | { 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX }, | ||
392 | { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 }, | 396 | { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 }, |
393 | 397 | ||
394 | { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 }, | 398 | { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 }, |
@@ -2721,13 +2725,15 @@ dump_tx_ring(struct net_device *dev) | |||
2721 | &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); | 2725 | &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); |
2722 | issue_and_wait(dev, DownStall); | 2726 | issue_and_wait(dev, DownStall); |
2723 | for (i = 0; i < TX_RING_SIZE; i++) { | 2727 | for (i = 0; i < TX_RING_SIZE; i++) { |
2724 | pr_err(" %d: @%p length %8.8x status %8.8x\n", i, | 2728 | unsigned int length; |
2725 | &vp->tx_ring[i], | 2729 | |
2726 | #if DO_ZEROCOPY | 2730 | #if DO_ZEROCOPY |
2727 | le32_to_cpu(vp->tx_ring[i].frag[0].length), | 2731 | length = le32_to_cpu(vp->tx_ring[i].frag[0].length); |
2728 | #else | 2732 | #else |
2729 | le32_to_cpu(vp->tx_ring[i].length), | 2733 | length = le32_to_cpu(vp->tx_ring[i].length); |
2730 | #endif | 2734 | #endif |
2735 | pr_err(" %d: @%p length %8.8x status %8.8x\n", | ||
2736 | i, &vp->tx_ring[i], length, | ||
2731 | le32_to_cpu(vp->tx_ring[i].status)); | 2737 | le32_to_cpu(vp->tx_ring[i].status)); |
2732 | } | 2738 | } |
2733 | if (!stalled) | 2739 | if (!stalled) |
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 50efde11ea6c..d0dbbf39349a 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -515,7 +515,7 @@ rx_status_loop: | |||
515 | dma_addr_t mapping; | 515 | dma_addr_t mapping; |
516 | struct sk_buff *skb, *new_skb; | 516 | struct sk_buff *skb, *new_skb; |
517 | struct cp_desc *desc; | 517 | struct cp_desc *desc; |
518 | unsigned buflen; | 518 | const unsigned buflen = cp->rx_buf_sz; |
519 | 519 | ||
520 | skb = cp->rx_skb[rx_tail]; | 520 | skb = cp->rx_skb[rx_tail]; |
521 | BUG_ON(!skb); | 521 | BUG_ON(!skb); |
@@ -549,8 +549,7 @@ rx_status_loop: | |||
549 | pr_debug("%s: rx slot %d status 0x%x len %d\n", | 549 | pr_debug("%s: rx slot %d status 0x%x len %d\n", |
550 | dev->name, rx_tail, status, len); | 550 | dev->name, rx_tail, status, len); |
551 | 551 | ||
552 | buflen = cp->rx_buf_sz + NET_IP_ALIGN; | 552 | new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN); |
553 | new_skb = netdev_alloc_skb(dev, buflen); | ||
554 | if (!new_skb) { | 553 | if (!new_skb) { |
555 | dev->stats.rx_dropped++; | 554 | dev->stats.rx_dropped++; |
556 | goto rx_next; | 555 | goto rx_next; |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 3ed49f1337c7..985a0550c803 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1727,12 +1727,14 @@ config KS8842 | |||
1727 | tristate "Micrel KSZ8842" | 1727 | tristate "Micrel KSZ8842" |
1728 | depends on HAS_IOMEM | 1728 | depends on HAS_IOMEM |
1729 | help | 1729 | help |
1730 | This platform driver is for Micrel KSZ8842 chip. | 1730 | This platform driver is for Micrel KSZ8842 / KS8842 |
1731 | 2-port ethernet switch chip (managed, VLAN, QoS). | ||
1731 | 1732 | ||
1732 | config KS8851 | 1733 | config KS8851 |
1733 | tristate "Micrel KS8851 SPI" | 1734 | tristate "Micrel KS8851 SPI" |
1734 | depends on SPI | 1735 | depends on SPI |
1735 | select MII | 1736 | select MII |
1737 | select CRC32 | ||
1736 | help | 1738 | help |
1737 | SPI driver for Micrel KS8851 SPI attached network chip. | 1739 | SPI driver for Micrel KS8851 SPI attached network chip. |
1738 | 1740 | ||
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c index 616fb7985a34..ddd231cb54b7 100644 --- a/drivers/net/arm/w90p910_ether.c +++ b/drivers/net/arm/w90p910_ether.c | |||
@@ -1080,7 +1080,7 @@ static struct platform_driver w90p910_ether_driver = { | |||
1080 | .probe = w90p910_ether_probe, | 1080 | .probe = w90p910_ether_probe, |
1081 | .remove = __devexit_p(w90p910_ether_remove), | 1081 | .remove = __devexit_p(w90p910_ether_remove), |
1082 | .driver = { | 1082 | .driver = { |
1083 | .name = "w90p910-emc", | 1083 | .name = "nuc900-emc", |
1084 | .owner = THIS_MODULE, | 1084 | .owner = THIS_MODULE, |
1085 | }, | 1085 | }, |
1086 | }; | 1086 | }; |
@@ -1101,5 +1101,5 @@ module_exit(w90p910_ether_exit); | |||
1101 | MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); | 1101 | MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); |
1102 | MODULE_DESCRIPTION("w90p910 MAC driver!"); | 1102 | MODULE_DESCRIPTION("w90p910 MAC driver!"); |
1103 | MODULE_LICENSE("GPL"); | 1103 | MODULE_LICENSE("GPL"); |
1104 | MODULE_ALIAS("platform:w90p910-emc"); | 1104 | MODULE_ALIAS("platform:nuc900-emc"); |
1105 | 1105 | ||
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c index 607007d75b6f..00d11b480af3 100644 --- a/drivers/net/atl1c/atl1c_ethtool.c +++ b/drivers/net/atl1c/atl1c_ethtool.c | |||
@@ -232,11 +232,11 @@ static void atl1c_get_drvinfo(struct net_device *netdev, | |||
232 | { | 232 | { |
233 | struct atl1c_adapter *adapter = netdev_priv(netdev); | 233 | struct atl1c_adapter *adapter = netdev_priv(netdev); |
234 | 234 | ||
235 | strncpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver)); | 235 | strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver)); |
236 | strncpy(drvinfo->version, atl1c_driver_version, | 236 | strlcpy(drvinfo->version, atl1c_driver_version, |
237 | sizeof(drvinfo->version)); | 237 | sizeof(drvinfo->version)); |
238 | strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); | 238 | strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); |
239 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), | 239 | strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), |
240 | sizeof(drvinfo->bus_info)); | 240 | sizeof(drvinfo->bus_info)); |
241 | drvinfo->n_stats = 0; | 241 | drvinfo->n_stats = 0; |
242 | drvinfo->testinfo_len = 0; | 242 | drvinfo->testinfo_len = 0; |
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 94d7325caf4f..8bca12f71390 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c | |||
@@ -3378,11 +3378,11 @@ static void atl1_get_drvinfo(struct net_device *netdev, | |||
3378 | { | 3378 | { |
3379 | struct atl1_adapter *adapter = netdev_priv(netdev); | 3379 | struct atl1_adapter *adapter = netdev_priv(netdev); |
3380 | 3380 | ||
3381 | strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); | 3381 | strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); |
3382 | strncpy(drvinfo->version, ATLX_DRIVER_VERSION, | 3382 | strlcpy(drvinfo->version, ATLX_DRIVER_VERSION, |
3383 | sizeof(drvinfo->version)); | 3383 | sizeof(drvinfo->version)); |
3384 | strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); | 3384 | strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); |
3385 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), | 3385 | strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), |
3386 | sizeof(drvinfo->bus_info)); | 3386 | sizeof(drvinfo->bus_info)); |
3387 | drvinfo->eedump_len = ATL1_EEDUMP_LEN; | 3387 | drvinfo->eedump_len = ATL1_EEDUMP_LEN; |
3388 | } | 3388 | } |
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 36d4d377ec2f..bafca672ea7d 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -952,9 +952,10 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
952 | int rc = NETDEV_TX_OK; | 952 | int rc = NETDEV_TX_OK; |
953 | dma_addr_t mapping; | 953 | dma_addr_t mapping; |
954 | u32 len, entry, ctrl; | 954 | u32 len, entry, ctrl; |
955 | unsigned long flags; | ||
955 | 956 | ||
956 | len = skb->len; | 957 | len = skb->len; |
957 | spin_lock_irq(&bp->lock); | 958 | spin_lock_irqsave(&bp->lock, flags); |
958 | 959 | ||
959 | /* This is a hard error, log it. */ | 960 | /* This is a hard error, log it. */ |
960 | if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { | 961 | if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { |
@@ -1027,7 +1028,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1027 | dev->trans_start = jiffies; | 1028 | dev->trans_start = jiffies; |
1028 | 1029 | ||
1029 | out_unlock: | 1030 | out_unlock: |
1030 | spin_unlock_irq(&bp->lock); | 1031 | spin_unlock_irqrestore(&bp->lock, flags); |
1031 | 1032 | ||
1032 | return rc; | 1033 | return rc; |
1033 | 1034 | ||
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index b70cc99962fc..06b901152d44 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -399,9 +399,11 @@ static int bnx2_unregister_cnic(struct net_device *dev) | |||
399 | struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; | 399 | struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; |
400 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; | 400 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; |
401 | 401 | ||
402 | mutex_lock(&bp->cnic_lock); | ||
402 | cp->drv_state = 0; | 403 | cp->drv_state = 0; |
403 | bnapi->cnic_present = 0; | 404 | bnapi->cnic_present = 0; |
404 | rcu_assign_pointer(bp->cnic_ops, NULL); | 405 | rcu_assign_pointer(bp->cnic_ops, NULL); |
406 | mutex_unlock(&bp->cnic_lock); | ||
405 | synchronize_rcu(); | 407 | synchronize_rcu(); |
406 | return 0; | 408 | return 0; |
407 | } | 409 | } |
@@ -429,13 +431,13 @@ bnx2_cnic_stop(struct bnx2 *bp) | |||
429 | struct cnic_ops *c_ops; | 431 | struct cnic_ops *c_ops; |
430 | struct cnic_ctl_info info; | 432 | struct cnic_ctl_info info; |
431 | 433 | ||
432 | rcu_read_lock(); | 434 | mutex_lock(&bp->cnic_lock); |
433 | c_ops = rcu_dereference(bp->cnic_ops); | 435 | c_ops = bp->cnic_ops; |
434 | if (c_ops) { | 436 | if (c_ops) { |
435 | info.cmd = CNIC_CTL_STOP_CMD; | 437 | info.cmd = CNIC_CTL_STOP_CMD; |
436 | c_ops->cnic_ctl(bp->cnic_data, &info); | 438 | c_ops->cnic_ctl(bp->cnic_data, &info); |
437 | } | 439 | } |
438 | rcu_read_unlock(); | 440 | mutex_unlock(&bp->cnic_lock); |
439 | } | 441 | } |
440 | 442 | ||
441 | static void | 443 | static void |
@@ -444,8 +446,8 @@ bnx2_cnic_start(struct bnx2 *bp) | |||
444 | struct cnic_ops *c_ops; | 446 | struct cnic_ops *c_ops; |
445 | struct cnic_ctl_info info; | 447 | struct cnic_ctl_info info; |
446 | 448 | ||
447 | rcu_read_lock(); | 449 | mutex_lock(&bp->cnic_lock); |
448 | c_ops = rcu_dereference(bp->cnic_ops); | 450 | c_ops = bp->cnic_ops; |
449 | if (c_ops) { | 451 | if (c_ops) { |
450 | if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { | 452 | if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { |
451 | struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; | 453 | struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; |
@@ -455,7 +457,7 @@ bnx2_cnic_start(struct bnx2 *bp) | |||
455 | info.cmd = CNIC_CTL_START_CMD; | 457 | info.cmd = CNIC_CTL_START_CMD; |
456 | c_ops->cnic_ctl(bp->cnic_data, &info); | 458 | c_ops->cnic_ctl(bp->cnic_data, &info); |
457 | } | 459 | } |
458 | rcu_read_unlock(); | 460 | mutex_unlock(&bp->cnic_lock); |
459 | } | 461 | } |
460 | 462 | ||
461 | #else | 463 | #else |
@@ -7663,6 +7665,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
7663 | 7665 | ||
7664 | spin_lock_init(&bp->phy_lock); | 7666 | spin_lock_init(&bp->phy_lock); |
7665 | spin_lock_init(&bp->indirect_lock); | 7667 | spin_lock_init(&bp->indirect_lock); |
7668 | #ifdef BCM_CNIC | ||
7669 | mutex_init(&bp->cnic_lock); | ||
7670 | #endif | ||
7666 | INIT_WORK(&bp->reset_task, bnx2_reset_task); | 7671 | INIT_WORK(&bp->reset_task, bnx2_reset_task); |
7667 | 7672 | ||
7668 | dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); | 7673 | dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); |
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index f1edfaa9e56a..a4f12fd0ecd2 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h | |||
@@ -6902,6 +6902,7 @@ struct bnx2 { | |||
6902 | u32 idle_chk_status_idx; | 6902 | u32 idle_chk_status_idx; |
6903 | 6903 | ||
6904 | #ifdef BCM_CNIC | 6904 | #ifdef BCM_CNIC |
6905 | struct mutex cnic_lock; | ||
6905 | struct cnic_eth_dev cnic_eth_dev; | 6906 | struct cnic_eth_dev cnic_eth_dev; |
6906 | #endif | 6907 | #endif |
6907 | 6908 | ||
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 9e4283aff828..e1a4f8214239 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c | |||
@@ -611,11 +611,18 @@ nla_put_failure: | |||
611 | return -EMSGSIZE; | 611 | return -EMSGSIZE; |
612 | } | 612 | } |
613 | 613 | ||
614 | static int can_newlink(struct net_device *dev, | ||
615 | struct nlattr *tb[], struct nlattr *data[]) | ||
616 | { | ||
617 | return -EOPNOTSUPP; | ||
618 | } | ||
619 | |||
614 | static struct rtnl_link_ops can_link_ops __read_mostly = { | 620 | static struct rtnl_link_ops can_link_ops __read_mostly = { |
615 | .kind = "can", | 621 | .kind = "can", |
616 | .maxtype = IFLA_CAN_MAX, | 622 | .maxtype = IFLA_CAN_MAX, |
617 | .policy = can_policy, | 623 | .policy = can_policy, |
618 | .setup = can_setup, | 624 | .setup = can_setup, |
625 | .newlink = can_newlink, | ||
619 | .changelink = can_changelink, | 626 | .changelink = can_changelink, |
620 | .fill_info = can_fill_info, | 627 | .fill_info = can_fill_info, |
621 | .fill_xstats = can_fill_xstats, | 628 | .fill_xstats = can_fill_xstats, |
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 4869d77cbe91..74c342959b7b 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -138,6 +138,16 @@ static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) | |||
138 | return NULL; | 138 | return NULL; |
139 | } | 139 | } |
140 | 140 | ||
141 | static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) | ||
142 | { | ||
143 | atomic_inc(&ulp_ops->ref_count); | ||
144 | } | ||
145 | |||
146 | static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) | ||
147 | { | ||
148 | atomic_dec(&ulp_ops->ref_count); | ||
149 | } | ||
150 | |||
141 | static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) | 151 | static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) |
142 | { | 152 | { |
143 | struct cnic_local *cp = dev->cnic_priv; | 153 | struct cnic_local *cp = dev->cnic_priv; |
@@ -358,6 +368,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) | |||
358 | } | 368 | } |
359 | read_unlock(&cnic_dev_lock); | 369 | read_unlock(&cnic_dev_lock); |
360 | 370 | ||
371 | atomic_set(&ulp_ops->ref_count, 0); | ||
361 | rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); | 372 | rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); |
362 | mutex_unlock(&cnic_lock); | 373 | mutex_unlock(&cnic_lock); |
363 | 374 | ||
@@ -379,6 +390,8 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) | |||
379 | int cnic_unregister_driver(int ulp_type) | 390 | int cnic_unregister_driver(int ulp_type) |
380 | { | 391 | { |
381 | struct cnic_dev *dev; | 392 | struct cnic_dev *dev; |
393 | struct cnic_ulp_ops *ulp_ops; | ||
394 | int i = 0; | ||
382 | 395 | ||
383 | if (ulp_type >= MAX_CNIC_ULP_TYPE) { | 396 | if (ulp_type >= MAX_CNIC_ULP_TYPE) { |
384 | printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", | 397 | printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", |
@@ -386,7 +399,8 @@ int cnic_unregister_driver(int ulp_type) | |||
386 | return -EINVAL; | 399 | return -EINVAL; |
387 | } | 400 | } |
388 | mutex_lock(&cnic_lock); | 401 | mutex_lock(&cnic_lock); |
389 | if (!cnic_ulp_tbl[ulp_type]) { | 402 | ulp_ops = cnic_ulp_tbl[ulp_type]; |
403 | if (!ulp_ops) { | ||
390 | printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not " | 404 | printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not " |
391 | "been registered\n", ulp_type); | 405 | "been registered\n", ulp_type); |
392 | goto out_unlock; | 406 | goto out_unlock; |
@@ -411,6 +425,14 @@ int cnic_unregister_driver(int ulp_type) | |||
411 | 425 | ||
412 | mutex_unlock(&cnic_lock); | 426 | mutex_unlock(&cnic_lock); |
413 | synchronize_rcu(); | 427 | synchronize_rcu(); |
428 | while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { | ||
429 | msleep(100); | ||
430 | i++; | ||
431 | } | ||
432 | |||
433 | if (atomic_read(&ulp_ops->ref_count) != 0) | ||
434 | printk(KERN_WARNING PFX "%s: Failed waiting for ref count to go" | ||
435 | " to zero.\n", dev->netdev->name); | ||
414 | return 0; | 436 | return 0; |
415 | 437 | ||
416 | out_unlock: | 438 | out_unlock: |
@@ -466,6 +488,7 @@ EXPORT_SYMBOL(cnic_register_driver); | |||
466 | static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) | 488 | static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) |
467 | { | 489 | { |
468 | struct cnic_local *cp = dev->cnic_priv; | 490 | struct cnic_local *cp = dev->cnic_priv; |
491 | int i = 0; | ||
469 | 492 | ||
470 | if (ulp_type >= MAX_CNIC_ULP_TYPE) { | 493 | if (ulp_type >= MAX_CNIC_ULP_TYPE) { |
471 | printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", | 494 | printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", |
@@ -486,6 +509,15 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) | |||
486 | 509 | ||
487 | synchronize_rcu(); | 510 | synchronize_rcu(); |
488 | 511 | ||
512 | while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && | ||
513 | i < 20) { | ||
514 | msleep(100); | ||
515 | i++; | ||
516 | } | ||
517 | if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) | ||
518 | printk(KERN_WARNING PFX "%s: Failed waiting for ULP up call" | ||
519 | " to complete.\n", dev->netdev->name); | ||
520 | |||
489 | return 0; | 521 | return 0; |
490 | } | 522 | } |
491 | EXPORT_SYMBOL(cnic_unregister_driver); | 523 | EXPORT_SYMBOL(cnic_unregister_driver); |
@@ -1076,18 +1108,23 @@ static void cnic_ulp_stop(struct cnic_dev *dev) | |||
1076 | if (cp->cnic_uinfo) | 1108 | if (cp->cnic_uinfo) |
1077 | cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); | 1109 | cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); |
1078 | 1110 | ||
1079 | rcu_read_lock(); | ||
1080 | for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { | 1111 | for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { |
1081 | struct cnic_ulp_ops *ulp_ops; | 1112 | struct cnic_ulp_ops *ulp_ops; |
1082 | 1113 | ||
1083 | ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); | 1114 | mutex_lock(&cnic_lock); |
1084 | if (!ulp_ops) | 1115 | ulp_ops = cp->ulp_ops[if_type]; |
1116 | if (!ulp_ops) { | ||
1117 | mutex_unlock(&cnic_lock); | ||
1085 | continue; | 1118 | continue; |
1119 | } | ||
1120 | set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); | ||
1121 | mutex_unlock(&cnic_lock); | ||
1086 | 1122 | ||
1087 | if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) | 1123 | if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) |
1088 | ulp_ops->cnic_stop(cp->ulp_handle[if_type]); | 1124 | ulp_ops->cnic_stop(cp->ulp_handle[if_type]); |
1125 | |||
1126 | clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); | ||
1089 | } | 1127 | } |
1090 | rcu_read_unlock(); | ||
1091 | } | 1128 | } |
1092 | 1129 | ||
1093 | static void cnic_ulp_start(struct cnic_dev *dev) | 1130 | static void cnic_ulp_start(struct cnic_dev *dev) |
@@ -1095,18 +1132,23 @@ static void cnic_ulp_start(struct cnic_dev *dev) | |||
1095 | struct cnic_local *cp = dev->cnic_priv; | 1132 | struct cnic_local *cp = dev->cnic_priv; |
1096 | int if_type; | 1133 | int if_type; |
1097 | 1134 | ||
1098 | rcu_read_lock(); | ||
1099 | for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { | 1135 | for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { |
1100 | struct cnic_ulp_ops *ulp_ops; | 1136 | struct cnic_ulp_ops *ulp_ops; |
1101 | 1137 | ||
1102 | ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); | 1138 | mutex_lock(&cnic_lock); |
1103 | if (!ulp_ops || !ulp_ops->cnic_start) | 1139 | ulp_ops = cp->ulp_ops[if_type]; |
1140 | if (!ulp_ops || !ulp_ops->cnic_start) { | ||
1141 | mutex_unlock(&cnic_lock); | ||
1104 | continue; | 1142 | continue; |
1143 | } | ||
1144 | set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); | ||
1145 | mutex_unlock(&cnic_lock); | ||
1105 | 1146 | ||
1106 | if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) | 1147 | if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) |
1107 | ulp_ops->cnic_start(cp->ulp_handle[if_type]); | 1148 | ulp_ops->cnic_start(cp->ulp_handle[if_type]); |
1149 | |||
1150 | clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); | ||
1108 | } | 1151 | } |
1109 | rcu_read_unlock(); | ||
1110 | } | 1152 | } |
1111 | 1153 | ||
1112 | static int cnic_ctl(void *data, struct cnic_ctl_info *info) | 1154 | static int cnic_ctl(void *data, struct cnic_ctl_info *info) |
@@ -1116,22 +1158,18 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info) | |||
1116 | switch (info->cmd) { | 1158 | switch (info->cmd) { |
1117 | case CNIC_CTL_STOP_CMD: | 1159 | case CNIC_CTL_STOP_CMD: |
1118 | cnic_hold(dev); | 1160 | cnic_hold(dev); |
1119 | mutex_lock(&cnic_lock); | ||
1120 | 1161 | ||
1121 | cnic_ulp_stop(dev); | 1162 | cnic_ulp_stop(dev); |
1122 | cnic_stop_hw(dev); | 1163 | cnic_stop_hw(dev); |
1123 | 1164 | ||
1124 | mutex_unlock(&cnic_lock); | ||
1125 | cnic_put(dev); | 1165 | cnic_put(dev); |
1126 | break; | 1166 | break; |
1127 | case CNIC_CTL_START_CMD: | 1167 | case CNIC_CTL_START_CMD: |
1128 | cnic_hold(dev); | 1168 | cnic_hold(dev); |
1129 | mutex_lock(&cnic_lock); | ||
1130 | 1169 | ||
1131 | if (!cnic_start_hw(dev)) | 1170 | if (!cnic_start_hw(dev)) |
1132 | cnic_ulp_start(dev); | 1171 | cnic_ulp_start(dev); |
1133 | 1172 | ||
1134 | mutex_unlock(&cnic_lock); | ||
1135 | cnic_put(dev); | 1173 | cnic_put(dev); |
1136 | break; | 1174 | break; |
1137 | default: | 1175 | default: |
@@ -1145,19 +1183,23 @@ static void cnic_ulp_init(struct cnic_dev *dev) | |||
1145 | int i; | 1183 | int i; |
1146 | struct cnic_local *cp = dev->cnic_priv; | 1184 | struct cnic_local *cp = dev->cnic_priv; |
1147 | 1185 | ||
1148 | rcu_read_lock(); | ||
1149 | for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { | 1186 | for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { |
1150 | struct cnic_ulp_ops *ulp_ops; | 1187 | struct cnic_ulp_ops *ulp_ops; |
1151 | 1188 | ||
1152 | ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); | 1189 | mutex_lock(&cnic_lock); |
1153 | if (!ulp_ops || !ulp_ops->cnic_init) | 1190 | ulp_ops = cnic_ulp_tbl[i]; |
1191 | if (!ulp_ops || !ulp_ops->cnic_init) { | ||
1192 | mutex_unlock(&cnic_lock); | ||
1154 | continue; | 1193 | continue; |
1194 | } | ||
1195 | ulp_get(ulp_ops); | ||
1196 | mutex_unlock(&cnic_lock); | ||
1155 | 1197 | ||
1156 | if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) | 1198 | if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) |
1157 | ulp_ops->cnic_init(dev); | 1199 | ulp_ops->cnic_init(dev); |
1158 | 1200 | ||
1201 | ulp_put(ulp_ops); | ||
1159 | } | 1202 | } |
1160 | rcu_read_unlock(); | ||
1161 | } | 1203 | } |
1162 | 1204 | ||
1163 | static void cnic_ulp_exit(struct cnic_dev *dev) | 1205 | static void cnic_ulp_exit(struct cnic_dev *dev) |
@@ -1165,19 +1207,23 @@ static void cnic_ulp_exit(struct cnic_dev *dev) | |||
1165 | int i; | 1207 | int i; |
1166 | struct cnic_local *cp = dev->cnic_priv; | 1208 | struct cnic_local *cp = dev->cnic_priv; |
1167 | 1209 | ||
1168 | rcu_read_lock(); | ||
1169 | for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { | 1210 | for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { |
1170 | struct cnic_ulp_ops *ulp_ops; | 1211 | struct cnic_ulp_ops *ulp_ops; |
1171 | 1212 | ||
1172 | ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); | 1213 | mutex_lock(&cnic_lock); |
1173 | if (!ulp_ops || !ulp_ops->cnic_exit) | 1214 | ulp_ops = cnic_ulp_tbl[i]; |
1215 | if (!ulp_ops || !ulp_ops->cnic_exit) { | ||
1216 | mutex_unlock(&cnic_lock); | ||
1174 | continue; | 1217 | continue; |
1218 | } | ||
1219 | ulp_get(ulp_ops); | ||
1220 | mutex_unlock(&cnic_lock); | ||
1175 | 1221 | ||
1176 | if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) | 1222 | if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) |
1177 | ulp_ops->cnic_exit(dev); | 1223 | ulp_ops->cnic_exit(dev); |
1178 | 1224 | ||
1225 | ulp_put(ulp_ops); | ||
1179 | } | 1226 | } |
1180 | rcu_read_unlock(); | ||
1181 | } | 1227 | } |
1182 | 1228 | ||
1183 | static int cnic_cm_offload_pg(struct cnic_sock *csk) | 1229 | static int cnic_cm_offload_pg(struct cnic_sock *csk) |
@@ -2393,21 +2439,45 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
2393 | return 0; | 2439 | return 0; |
2394 | } | 2440 | } |
2395 | 2441 | ||
2396 | static int cnic_start_hw(struct cnic_dev *dev) | 2442 | static int cnic_register_netdev(struct cnic_dev *dev) |
2397 | { | 2443 | { |
2398 | struct cnic_local *cp = dev->cnic_priv; | 2444 | struct cnic_local *cp = dev->cnic_priv; |
2399 | struct cnic_eth_dev *ethdev = cp->ethdev; | 2445 | struct cnic_eth_dev *ethdev = cp->ethdev; |
2400 | int err; | 2446 | int err; |
2401 | 2447 | ||
2402 | if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) | 2448 | if (!ethdev) |
2403 | return -EALREADY; | 2449 | return -ENODEV; |
2450 | |||
2451 | if (ethdev->drv_state & CNIC_DRV_STATE_REGD) | ||
2452 | return 0; | ||
2404 | 2453 | ||
2405 | err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); | 2454 | err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); |
2406 | if (err) { | 2455 | if (err) |
2407 | printk(KERN_ERR PFX "%s: register_cnic failed\n", | 2456 | printk(KERN_ERR PFX "%s: register_cnic failed\n", |
2408 | dev->netdev->name); | 2457 | dev->netdev->name); |
2409 | goto err2; | 2458 | |
2410 | } | 2459 | return err; |
2460 | } | ||
2461 | |||
2462 | static void cnic_unregister_netdev(struct cnic_dev *dev) | ||
2463 | { | ||
2464 | struct cnic_local *cp = dev->cnic_priv; | ||
2465 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
2466 | |||
2467 | if (!ethdev) | ||
2468 | return; | ||
2469 | |||
2470 | ethdev->drv_unregister_cnic(dev->netdev); | ||
2471 | } | ||
2472 | |||
2473 | static int cnic_start_hw(struct cnic_dev *dev) | ||
2474 | { | ||
2475 | struct cnic_local *cp = dev->cnic_priv; | ||
2476 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
2477 | int err; | ||
2478 | |||
2479 | if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) | ||
2480 | return -EALREADY; | ||
2411 | 2481 | ||
2412 | dev->regview = ethdev->io_base; | 2482 | dev->regview = ethdev->io_base; |
2413 | cp->chip_id = ethdev->chip_id; | 2483 | cp->chip_id = ethdev->chip_id; |
@@ -2438,18 +2508,13 @@ static int cnic_start_hw(struct cnic_dev *dev) | |||
2438 | return 0; | 2508 | return 0; |
2439 | 2509 | ||
2440 | err1: | 2510 | err1: |
2441 | ethdev->drv_unregister_cnic(dev->netdev); | ||
2442 | cp->free_resc(dev); | 2511 | cp->free_resc(dev); |
2443 | pci_dev_put(dev->pcidev); | 2512 | pci_dev_put(dev->pcidev); |
2444 | err2: | ||
2445 | return err; | 2513 | return err; |
2446 | } | 2514 | } |
2447 | 2515 | ||
2448 | static void cnic_stop_bnx2_hw(struct cnic_dev *dev) | 2516 | static void cnic_stop_bnx2_hw(struct cnic_dev *dev) |
2449 | { | 2517 | { |
2450 | struct cnic_local *cp = dev->cnic_priv; | ||
2451 | struct cnic_eth_dev *ethdev = cp->ethdev; | ||
2452 | |||
2453 | cnic_disable_bnx2_int_sync(dev); | 2518 | cnic_disable_bnx2_int_sync(dev); |
2454 | 2519 | ||
2455 | cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); | 2520 | cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); |
@@ -2461,8 +2526,6 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev) | |||
2461 | cnic_setup_5709_context(dev, 0); | 2526 | cnic_setup_5709_context(dev, 0); |
2462 | cnic_free_irq(dev); | 2527 | cnic_free_irq(dev); |
2463 | 2528 | ||
2464 | ethdev->drv_unregister_cnic(dev->netdev); | ||
2465 | |||
2466 | cnic_free_resc(dev); | 2529 | cnic_free_resc(dev); |
2467 | } | 2530 | } |
2468 | 2531 | ||
@@ -2543,7 +2606,7 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) | |||
2543 | probe = symbol_get(bnx2_cnic_probe); | 2606 | probe = symbol_get(bnx2_cnic_probe); |
2544 | if (probe) { | 2607 | if (probe) { |
2545 | ethdev = (*probe)(dev); | 2608 | ethdev = (*probe)(dev); |
2546 | symbol_put_addr(probe); | 2609 | symbol_put(bnx2_cnic_probe); |
2547 | } | 2610 | } |
2548 | if (!ethdev) | 2611 | if (!ethdev) |
2549 | return NULL; | 2612 | return NULL; |
@@ -2646,10 +2709,12 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event, | |||
2646 | else if (event == NETDEV_UNREGISTER) | 2709 | else if (event == NETDEV_UNREGISTER) |
2647 | cnic_ulp_exit(dev); | 2710 | cnic_ulp_exit(dev); |
2648 | else if (event == NETDEV_UP) { | 2711 | else if (event == NETDEV_UP) { |
2649 | mutex_lock(&cnic_lock); | 2712 | if (cnic_register_netdev(dev) != 0) { |
2713 | cnic_put(dev); | ||
2714 | goto done; | ||
2715 | } | ||
2650 | if (!cnic_start_hw(dev)) | 2716 | if (!cnic_start_hw(dev)) |
2651 | cnic_ulp_start(dev); | 2717 | cnic_ulp_start(dev); |
2652 | mutex_unlock(&cnic_lock); | ||
2653 | } | 2718 | } |
2654 | 2719 | ||
2655 | rcu_read_lock(); | 2720 | rcu_read_lock(); |
@@ -2668,10 +2733,9 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event, | |||
2668 | rcu_read_unlock(); | 2733 | rcu_read_unlock(); |
2669 | 2734 | ||
2670 | if (event == NETDEV_GOING_DOWN) { | 2735 | if (event == NETDEV_GOING_DOWN) { |
2671 | mutex_lock(&cnic_lock); | ||
2672 | cnic_ulp_stop(dev); | 2736 | cnic_ulp_stop(dev); |
2673 | cnic_stop_hw(dev); | 2737 | cnic_stop_hw(dev); |
2674 | mutex_unlock(&cnic_lock); | 2738 | cnic_unregister_netdev(dev); |
2675 | } else if (event == NETDEV_UNREGISTER) { | 2739 | } else if (event == NETDEV_UNREGISTER) { |
2676 | write_lock(&cnic_dev_lock); | 2740 | write_lock(&cnic_dev_lock); |
2677 | list_del_init(&dev->list); | 2741 | list_del_init(&dev->list); |
@@ -2703,6 +2767,7 @@ static void cnic_release(void) | |||
2703 | } | 2767 | } |
2704 | 2768 | ||
2705 | cnic_ulp_exit(dev); | 2769 | cnic_ulp_exit(dev); |
2770 | cnic_unregister_netdev(dev); | ||
2706 | list_del_init(&dev->list); | 2771 | list_del_init(&dev->list); |
2707 | cnic_free_dev(dev); | 2772 | cnic_free_dev(dev); |
2708 | } | 2773 | } |
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h index 5192d4a9df5a..a94b302bb464 100644 --- a/drivers/net/cnic.h +++ b/drivers/net/cnic.h | |||
@@ -176,6 +176,7 @@ struct cnic_local { | |||
176 | unsigned long ulp_flags[MAX_CNIC_ULP_TYPE]; | 176 | unsigned long ulp_flags[MAX_CNIC_ULP_TYPE]; |
177 | #define ULP_F_INIT 0 | 177 | #define ULP_F_INIT 0 |
178 | #define ULP_F_START 1 | 178 | #define ULP_F_START 1 |
179 | #define ULP_F_CALL_PENDING 2 | ||
179 | struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; | 180 | struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; |
180 | 181 | ||
181 | /* protected by ulp_lock */ | 182 | /* protected by ulp_lock */ |
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h index d1bce27ee99e..a49235739eef 100644 --- a/drivers/net/cnic_if.h +++ b/drivers/net/cnic_if.h | |||
@@ -290,6 +290,7 @@ struct cnic_ulp_ops { | |||
290 | void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type, | 290 | void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type, |
291 | char *data, u16 data_size); | 291 | char *data, u16 data_size); |
292 | struct module *owner; | 292 | struct module *owner; |
293 | atomic_t ref_count; | ||
293 | }; | 294 | }; |
294 | 295 | ||
295 | extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); | 296 | extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 41b648a67fec..3a6735dc9f6a 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -1899,7 +1899,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, | |||
1899 | nic->ru_running = RU_SUSPENDED; | 1899 | nic->ru_running = RU_SUSPENDED; |
1900 | pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, | 1900 | pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, |
1901 | sizeof(struct rfd), | 1901 | sizeof(struct rfd), |
1902 | PCI_DMA_BIDIRECTIONAL); | 1902 | PCI_DMA_FROMDEVICE); |
1903 | return -ENODATA; | 1903 | return -ENODATA; |
1904 | } | 1904 | } |
1905 | 1905 | ||
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index d56c7473144a..99df2abf82a9 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -338,10 +338,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) | |||
338 | { | 338 | { |
339 | struct e1000_nvm_info *nvm = &hw->nvm; | 339 | struct e1000_nvm_info *nvm = &hw->nvm; |
340 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | 340 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; |
341 | union ich8_hws_flash_status hsfsts; | 341 | u32 gfpreg, sector_base_addr, sector_end_addr; |
342 | u32 gfpreg; | ||
343 | u32 sector_base_addr; | ||
344 | u32 sector_end_addr; | ||
345 | u16 i; | 342 | u16 i; |
346 | 343 | ||
347 | /* Can't read flash registers if the register set isn't mapped. */ | 344 | /* Can't read flash registers if the register set isn't mapped. */ |
@@ -375,20 +372,6 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) | |||
375 | /* Adjust to word count */ | 372 | /* Adjust to word count */ |
376 | nvm->flash_bank_size /= sizeof(u16); | 373 | nvm->flash_bank_size /= sizeof(u16); |
377 | 374 | ||
378 | /* | ||
379 | * Make sure the flash bank size does not overwrite the 4k | ||
380 | * sector ranges. We may have 64k allotted to us but we only care | ||
381 | * about the first 2 4k sectors. Therefore, if we have anything less | ||
382 | * than 64k set in the HSFSTS register, we will reduce the bank size | ||
383 | * down to 4k and let the rest remain unused. If berasesz == 3, then | ||
384 | * we are working in 64k mode. Otherwise we are not. | ||
385 | */ | ||
386 | if (nvm->flash_bank_size > E1000_ICH8_SHADOW_RAM_WORDS) { | ||
387 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | ||
388 | if (hsfsts.hsf_status.berasesz != 3) | ||
389 | nvm->flash_bank_size = E1000_ICH8_SHADOW_RAM_WORDS; | ||
390 | } | ||
391 | |||
392 | nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; | 375 | nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; |
393 | 376 | ||
394 | /* Clear shadow ram */ | 377 | /* Clear shadow ram */ |
@@ -594,8 +577,8 @@ static DEFINE_MUTEX(nvm_mutex); | |||
594 | **/ | 577 | **/ |
595 | static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | 578 | static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) |
596 | { | 579 | { |
597 | u32 extcnf_ctrl; | 580 | u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; |
598 | u32 timeout = PHY_CFG_TIMEOUT; | 581 | s32 ret_val = 0; |
599 | 582 | ||
600 | might_sleep(); | 583 | might_sleep(); |
601 | 584 | ||
@@ -603,28 +586,46 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
603 | 586 | ||
604 | while (timeout) { | 587 | while (timeout) { |
605 | extcnf_ctrl = er32(EXTCNF_CTRL); | 588 | extcnf_ctrl = er32(EXTCNF_CTRL); |
589 | if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) | ||
590 | break; | ||
606 | 591 | ||
607 | if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) { | 592 | mdelay(1); |
608 | extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; | 593 | timeout--; |
609 | ew32(EXTCNF_CTRL, extcnf_ctrl); | 594 | } |
595 | |||
596 | if (!timeout) { | ||
597 | hw_dbg(hw, "SW/FW/HW has locked the resource for too long.\n"); | ||
598 | ret_val = -E1000_ERR_CONFIG; | ||
599 | goto out; | ||
600 | } | ||
601 | |||
602 | timeout = PHY_CFG_TIMEOUT * 2; | ||
603 | |||
604 | extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; | ||
605 | ew32(EXTCNF_CTRL, extcnf_ctrl); | ||
606 | |||
607 | while (timeout) { | ||
608 | extcnf_ctrl = er32(EXTCNF_CTRL); | ||
609 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) | ||
610 | break; | ||
610 | 611 | ||
611 | extcnf_ctrl = er32(EXTCNF_CTRL); | ||
612 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) | ||
613 | break; | ||
614 | } | ||
615 | mdelay(1); | 612 | mdelay(1); |
616 | timeout--; | 613 | timeout--; |
617 | } | 614 | } |
618 | 615 | ||
619 | if (!timeout) { | 616 | if (!timeout) { |
620 | hw_dbg(hw, "FW or HW has locked the resource for too long.\n"); | 617 | hw_dbg(hw, "Failed to acquire the semaphore.\n"); |
621 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | 618 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; |
622 | ew32(EXTCNF_CTRL, extcnf_ctrl); | 619 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
623 | mutex_unlock(&nvm_mutex); | 620 | ret_val = -E1000_ERR_CONFIG; |
624 | return -E1000_ERR_CONFIG; | 621 | goto out; |
625 | } | 622 | } |
626 | 623 | ||
627 | return 0; | 624 | out: |
625 | if (ret_val) | ||
626 | mutex_unlock(&nvm_mutex); | ||
627 | |||
628 | return ret_val; | ||
628 | } | 629 | } |
629 | 630 | ||
630 | /** | 631 | /** |
@@ -1306,7 +1307,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1306 | struct e1000_nvm_info *nvm = &hw->nvm; | 1307 | struct e1000_nvm_info *nvm = &hw->nvm; |
1307 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | 1308 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; |
1308 | u32 act_offset; | 1309 | u32 act_offset; |
1309 | s32 ret_val; | 1310 | s32 ret_val = 0; |
1310 | u32 bank = 0; | 1311 | u32 bank = 0; |
1311 | u16 i, word; | 1312 | u16 i, word; |
1312 | 1313 | ||
@@ -1321,12 +1322,15 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1321 | goto out; | 1322 | goto out; |
1322 | 1323 | ||
1323 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | 1324 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); |
1324 | if (ret_val) | 1325 | if (ret_val) { |
1325 | goto release; | 1326 | hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n"); |
1327 | bank = 0; | ||
1328 | } | ||
1326 | 1329 | ||
1327 | act_offset = (bank) ? nvm->flash_bank_size : 0; | 1330 | act_offset = (bank) ? nvm->flash_bank_size : 0; |
1328 | act_offset += offset; | 1331 | act_offset += offset; |
1329 | 1332 | ||
1333 | ret_val = 0; | ||
1330 | for (i = 0; i < words; i++) { | 1334 | for (i = 0; i < words; i++) { |
1331 | if ((dev_spec->shadow_ram) && | 1335 | if ((dev_spec->shadow_ram) && |
1332 | (dev_spec->shadow_ram[offset+i].modified)) { | 1336 | (dev_spec->shadow_ram[offset+i].modified)) { |
@@ -1341,7 +1345,6 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1341 | } | 1345 | } |
1342 | } | 1346 | } |
1343 | 1347 | ||
1344 | release: | ||
1345 | e1000_release_swflag_ich8lan(hw); | 1348 | e1000_release_swflag_ich8lan(hw); |
1346 | 1349 | ||
1347 | out: | 1350 | out: |
@@ -1592,7 +1595,6 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1592 | { | 1595 | { |
1593 | struct e1000_nvm_info *nvm = &hw->nvm; | 1596 | struct e1000_nvm_info *nvm = &hw->nvm; |
1594 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | 1597 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; |
1595 | s32 ret_val; | ||
1596 | u16 i; | 1598 | u16 i; |
1597 | 1599 | ||
1598 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || | 1600 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || |
@@ -1601,17 +1603,11 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1601 | return -E1000_ERR_NVM; | 1603 | return -E1000_ERR_NVM; |
1602 | } | 1604 | } |
1603 | 1605 | ||
1604 | ret_val = e1000_acquire_swflag_ich8lan(hw); | ||
1605 | if (ret_val) | ||
1606 | return ret_val; | ||
1607 | |||
1608 | for (i = 0; i < words; i++) { | 1606 | for (i = 0; i < words; i++) { |
1609 | dev_spec->shadow_ram[offset+i].modified = 1; | 1607 | dev_spec->shadow_ram[offset+i].modified = 1; |
1610 | dev_spec->shadow_ram[offset+i].value = data[i]; | 1608 | dev_spec->shadow_ram[offset+i].value = data[i]; |
1611 | } | 1609 | } |
1612 | 1610 | ||
1613 | e1000_release_swflag_ich8lan(hw); | ||
1614 | |||
1615 | return 0; | 1611 | return 0; |
1616 | } | 1612 | } |
1617 | 1613 | ||
@@ -1652,8 +1648,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1652 | */ | 1648 | */ |
1653 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | 1649 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); |
1654 | if (ret_val) { | 1650 | if (ret_val) { |
1655 | e1000_release_swflag_ich8lan(hw); | 1651 | hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n"); |
1656 | goto out; | 1652 | bank = 0; |
1657 | } | 1653 | } |
1658 | 1654 | ||
1659 | if (bank == 0) { | 1655 | if (bank == 0) { |
@@ -2039,12 +2035,8 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) | |||
2039 | iteration = 1; | 2035 | iteration = 1; |
2040 | break; | 2036 | break; |
2041 | case 2: | 2037 | case 2: |
2042 | if (hw->mac.type == e1000_ich9lan) { | 2038 | sector_size = ICH_FLASH_SEG_SIZE_8K; |
2043 | sector_size = ICH_FLASH_SEG_SIZE_8K; | 2039 | iteration = 1; |
2044 | iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_8K; | ||
2045 | } else { | ||
2046 | return -E1000_ERR_NVM; | ||
2047 | } | ||
2048 | break; | 2040 | break; |
2049 | case 3: | 2041 | case 3: |
2050 | sector_size = ICH_FLASH_SEG_SIZE_64K; | 2042 | sector_size = ICH_FLASH_SEG_SIZE_64K; |
@@ -2056,7 +2048,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) | |||
2056 | 2048 | ||
2057 | /* Start with the base address, then add the sector offset. */ | 2049 | /* Start with the base address, then add the sector offset. */ |
2058 | flash_linear_addr = hw->nvm.flash_base_addr; | 2050 | flash_linear_addr = hw->nvm.flash_base_addr; |
2059 | flash_linear_addr += (bank) ? (sector_size * iteration) : 0; | 2051 | flash_linear_addr += (bank) ? flash_bank_size : 0; |
2060 | 2052 | ||
2061 | for (j = 0; j < iteration ; j++) { | 2053 | for (j = 0; j < iteration ; j++) { |
2062 | do { | 2054 | do { |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 63415bb6f48f..fa92a683aefd 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -4538,8 +4538,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
4538 | /* Allow time for pending master requests to run */ | 4538 | /* Allow time for pending master requests to run */ |
4539 | e1000e_disable_pcie_master(&adapter->hw); | 4539 | e1000e_disable_pcie_master(&adapter->hw); |
4540 | 4540 | ||
4541 | if ((adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) && | 4541 | if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { |
4542 | !(hw->mac.ops.check_mng_mode(hw))) { | ||
4543 | /* enable wakeup by the PHY */ | 4542 | /* enable wakeup by the PHY */ |
4544 | retval = e1000_init_phy_wakeup(adapter, wufc); | 4543 | retval = e1000_init_phy_wakeup(adapter, wufc); |
4545 | if (retval) | 4544 | if (retval) |
@@ -4557,7 +4556,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
4557 | *enable_wake = !!wufc; | 4556 | *enable_wake = !!wufc; |
4558 | 4557 | ||
4559 | /* make sure adapter isn't asleep if manageability is enabled */ | 4558 | /* make sure adapter isn't asleep if manageability is enabled */ |
4560 | if (adapter->flags & FLAG_MNG_PT_ENABLED) | 4559 | if ((adapter->flags & FLAG_MNG_PT_ENABLED) || |
4560 | (hw->mac.ops.check_mng_mode(hw))) | ||
4561 | *enable_wake = true; | 4561 | *enable_wake = true; |
4562 | 4562 | ||
4563 | if (adapter->hw.phy.type == e1000_phy_igp_3) | 4563 | if (adapter->hw.phy.type == e1000_phy_igp_3) |
@@ -4670,14 +4670,6 @@ static int e1000_resume(struct pci_dev *pdev) | |||
4670 | return err; | 4670 | return err; |
4671 | } | 4671 | } |
4672 | 4672 | ||
4673 | /* AER (Advanced Error Reporting) hooks */ | ||
4674 | err = pci_enable_pcie_error_reporting(pdev); | ||
4675 | if (err) { | ||
4676 | dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed " | ||
4677 | "0x%x\n", err); | ||
4678 | /* non-fatal, continue */ | ||
4679 | } | ||
4680 | |||
4681 | pci_set_master(pdev); | 4673 | pci_set_master(pdev); |
4682 | 4674 | ||
4683 | pci_enable_wake(pdev, PCI_D3hot, 0); | 4675 | pci_enable_wake(pdev, PCI_D3hot, 0); |
@@ -4990,6 +4982,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4990 | if (err) | 4982 | if (err) |
4991 | goto err_pci_reg; | 4983 | goto err_pci_reg; |
4992 | 4984 | ||
4985 | /* AER (Advanced Error Reporting) hooks */ | ||
4986 | err = pci_enable_pcie_error_reporting(pdev); | ||
4987 | if (err) { | ||
4988 | dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed " | ||
4989 | "0x%x\n", err); | ||
4990 | /* non-fatal, continue */ | ||
4991 | } | ||
4992 | |||
4993 | pci_set_master(pdev); | 4993 | pci_set_master(pdev); |
4994 | /* PCI config space info */ | 4994 | /* PCI config space info */ |
4995 | err = pci_save_state(pdev); | 4995 | err = pci_save_state(pdev); |
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 1686dca28748..1f016d66684a 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c | |||
@@ -1474,13 +1474,13 @@ static void eexp_hw_init586(struct net_device *dev) | |||
1474 | outw(0x0000, ioaddr + 0x800c); | 1474 | outw(0x0000, ioaddr + 0x800c); |
1475 | outw(0x0000, ioaddr + 0x800e); | 1475 | outw(0x0000, ioaddr + 0x800e); |
1476 | 1476 | ||
1477 | for (i = 0; i < (sizeof(start_code)); i+=32) { | 1477 | for (i = 0; i < ARRAY_SIZE(start_code) * 2; i+=32) { |
1478 | int j; | 1478 | int j; |
1479 | outw(i, ioaddr + SM_PTR); | 1479 | outw(i, ioaddr + SM_PTR); |
1480 | for (j = 0; j < 16; j+=2) | 1480 | for (j = 0; j < 16 && (i+j)/2 < ARRAY_SIZE(start_code); j+=2) |
1481 | outw(start_code[(i+j)/2], | 1481 | outw(start_code[(i+j)/2], |
1482 | ioaddr+0x4000+j); | 1482 | ioaddr+0x4000+j); |
1483 | for (j = 0; j < 16; j+=2) | 1483 | for (j = 0; j < 16 && (i+j+16)/2 < ARRAY_SIZE(start_code); j+=2) |
1484 | outw(start_code[(i+j+16)/2], | 1484 | outw(start_code[(i+j+16)/2], |
1485 | ioaddr+0x8000+j); | 1485 | ioaddr+0x8000+j); |
1486 | } | 1486 | } |
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 78952f8324e2..fa311a950996 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | #define DRV_NAME "ehea" | 42 | #define DRV_NAME "ehea" |
43 | #define DRV_VERSION "EHEA_0101" | 43 | #define DRV_VERSION "EHEA_0102" |
44 | 44 | ||
45 | /* eHEA capability flags */ | 45 | /* eHEA capability flags */ |
46 | #define DLPAR_PORT_ADD_REM 1 | 46 | #define DLPAR_PORT_ADD_REM 1 |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index e8d46cc1bec2..977c3d358279 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -1545,6 +1545,9 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) | |||
1545 | { | 1545 | { |
1546 | int ret, i; | 1546 | int ret, i; |
1547 | 1547 | ||
1548 | if (pr->qp) | ||
1549 | netif_napi_del(&pr->napi); | ||
1550 | |||
1548 | ret = ehea_destroy_qp(pr->qp); | 1551 | ret = ehea_destroy_qp(pr->qp); |
1549 | 1552 | ||
1550 | if (!ret) { | 1553 | if (!ret) { |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index d4b98074b1b7..c9fd82d3a80d 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -285,6 +285,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
285 | { | 285 | { |
286 | struct fec_enet_private *fep = netdev_priv(dev); | 286 | struct fec_enet_private *fep = netdev_priv(dev); |
287 | struct bufdesc *bdp; | 287 | struct bufdesc *bdp; |
288 | void *bufaddr; | ||
288 | unsigned short status; | 289 | unsigned short status; |
289 | unsigned long flags; | 290 | unsigned long flags; |
290 | 291 | ||
@@ -312,7 +313,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
312 | status &= ~BD_ENET_TX_STATS; | 313 | status &= ~BD_ENET_TX_STATS; |
313 | 314 | ||
314 | /* Set buffer length and buffer pointer */ | 315 | /* Set buffer length and buffer pointer */ |
315 | bdp->cbd_bufaddr = __pa(skb->data); | 316 | bufaddr = skb->data; |
316 | bdp->cbd_datlen = skb->len; | 317 | bdp->cbd_datlen = skb->len; |
317 | 318 | ||
318 | /* | 319 | /* |
@@ -320,11 +321,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
320 | * 4-byte boundaries. Use bounce buffers to copy data | 321 | * 4-byte boundaries. Use bounce buffers to copy data |
321 | * and get it aligned. Ugh. | 322 | * and get it aligned. Ugh. |
322 | */ | 323 | */ |
323 | if (bdp->cbd_bufaddr & FEC_ALIGNMENT) { | 324 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { |
324 | unsigned int index; | 325 | unsigned int index; |
325 | index = bdp - fep->tx_bd_base; | 326 | index = bdp - fep->tx_bd_base; |
326 | memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); | 327 | memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); |
327 | bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); | 328 | bufaddr = fep->tx_bounce[index]; |
328 | } | 329 | } |
329 | 330 | ||
330 | /* Save skb pointer */ | 331 | /* Save skb pointer */ |
@@ -336,7 +337,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
336 | /* Push the data cache so the CPM does not get stale memory | 337 | /* Push the data cache so the CPM does not get stale memory |
337 | * data. | 338 | * data. |
338 | */ | 339 | */ |
339 | bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, | 340 | bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr, |
340 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); | 341 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); |
341 | 342 | ||
342 | /* Send it on its way. Tell FEC it's ready, interrupt when done, | 343 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index cc786333d95c..c40113f58963 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c | |||
@@ -309,6 +309,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
309 | { | 309 | { |
310 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | 310 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); |
311 | struct bcom_fec_bd *bd; | 311 | struct bcom_fec_bd *bd; |
312 | unsigned long flags; | ||
312 | 313 | ||
313 | if (bcom_queue_full(priv->tx_dmatsk)) { | 314 | if (bcom_queue_full(priv->tx_dmatsk)) { |
314 | if (net_ratelimit()) | 315 | if (net_ratelimit()) |
@@ -316,7 +317,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
316 | return NETDEV_TX_BUSY; | 317 | return NETDEV_TX_BUSY; |
317 | } | 318 | } |
318 | 319 | ||
319 | spin_lock_irq(&priv->lock); | 320 | spin_lock_irqsave(&priv->lock, flags); |
320 | dev->trans_start = jiffies; | 321 | dev->trans_start = jiffies; |
321 | 322 | ||
322 | bd = (struct bcom_fec_bd *) | 323 | bd = (struct bcom_fec_bd *) |
@@ -332,7 +333,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
332 | netif_stop_queue(dev); | 333 | netif_stop_queue(dev); |
333 | } | 334 | } |
334 | 335 | ||
335 | spin_unlock_irq(&priv->lock); | 336 | spin_unlock_irqrestore(&priv->lock, flags); |
336 | 337 | ||
337 | return NETDEV_TX_OK; | 338 | return NETDEV_TX_OK; |
338 | } | 339 | } |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index f8ffcbf0bc39..e212f2c5448b 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -936,6 +936,7 @@ int startup_gfar(struct net_device *dev) | |||
936 | struct gfar __iomem *regs = priv->regs; | 936 | struct gfar __iomem *regs = priv->regs; |
937 | int err = 0; | 937 | int err = 0; |
938 | u32 rctrl = 0; | 938 | u32 rctrl = 0; |
939 | u32 tctrl = 0; | ||
939 | u32 attrs = 0; | 940 | u32 attrs = 0; |
940 | 941 | ||
941 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | 942 | gfar_write(®s->imask, IMASK_INIT_CLEAR); |
@@ -1111,11 +1112,19 @@ int startup_gfar(struct net_device *dev) | |||
1111 | rctrl |= RCTRL_PADDING(priv->padding); | 1112 | rctrl |= RCTRL_PADDING(priv->padding); |
1112 | } | 1113 | } |
1113 | 1114 | ||
1115 | /* keep vlan related bits if it's enabled */ | ||
1116 | if (priv->vlgrp) { | ||
1117 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; | ||
1118 | tctrl |= TCTRL_VLINS; | ||
1119 | } | ||
1120 | |||
1114 | /* Init rctrl based on our settings */ | 1121 | /* Init rctrl based on our settings */ |
1115 | gfar_write(&priv->regs->rctrl, rctrl); | 1122 | gfar_write(&priv->regs->rctrl, rctrl); |
1116 | 1123 | ||
1117 | if (dev->features & NETIF_F_IP_CSUM) | 1124 | if (dev->features & NETIF_F_IP_CSUM) |
1118 | gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM); | 1125 | tctrl |= TCTRL_INIT_CSUM; |
1126 | |||
1127 | gfar_write(&priv->regs->tctrl, tctrl); | ||
1119 | 1128 | ||
1120 | /* Set the extraction length and index */ | 1129 | /* Set the extraction length and index */ |
1121 | attrs = ATTRELI_EL(priv->rx_stash_size) | | 1130 | attrs = ATTRELI_EL(priv->rx_stash_size) | |
@@ -1450,7 +1459,6 @@ static void gfar_vlan_rx_register(struct net_device *dev, | |||
1450 | 1459 | ||
1451 | /* Enable VLAN tag extraction */ | 1460 | /* Enable VLAN tag extraction */ |
1452 | tempval = gfar_read(&priv->regs->rctrl); | 1461 | tempval = gfar_read(&priv->regs->rctrl); |
1453 | tempval |= RCTRL_VLEX; | ||
1454 | tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); | 1462 | tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); |
1455 | gfar_write(&priv->regs->rctrl, tempval); | 1463 | gfar_write(&priv->regs->rctrl, tempval); |
1456 | } else { | 1464 | } else { |
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index dbf06e9313cc..2234118eedbb 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c | |||
@@ -366,9 +366,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals | |||
366 | return -EINVAL; | 366 | return -EINVAL; |
367 | } | 367 | } |
368 | 368 | ||
369 | priv->rxic = mk_ic_value( | 369 | priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames, |
370 | gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs), | 370 | gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); |
371 | cvals->rx_max_coalesced_frames); | ||
372 | 371 | ||
373 | /* Set up tx coalescing */ | 372 | /* Set up tx coalescing */ |
374 | if ((cvals->tx_coalesce_usecs == 0) || | 373 | if ((cvals->tx_coalesce_usecs == 0) || |
@@ -390,9 +389,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals | |||
390 | return -EINVAL; | 389 | return -EINVAL; |
391 | } | 390 | } |
392 | 391 | ||
393 | priv->txic = mk_ic_value( | 392 | priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames, |
394 | gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs), | 393 | gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); |
395 | cvals->tx_max_coalesced_frames); | ||
396 | 394 | ||
397 | gfar_write(&priv->regs->rxic, 0); | 395 | gfar_write(&priv->regs->rxic, 0); |
398 | if (priv->rxcoalescing) | 396 | if (priv->rxcoalescing) |
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index beb84213b671..f0f890803710 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c | |||
@@ -1305,6 +1305,8 @@ static int emac_close(struct net_device *ndev) | |||
1305 | 1305 | ||
1306 | free_irq(dev->emac_irq, dev); | 1306 | free_irq(dev->emac_irq, dev); |
1307 | 1307 | ||
1308 | netif_carrier_off(ndev); | ||
1309 | |||
1308 | return 0; | 1310 | return 0; |
1309 | } | 1311 | } |
1310 | 1312 | ||
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c index 2a4faf9ade69..a9a61efa964c 100644 --- a/drivers/net/igbvf/vf.c +++ b/drivers/net/igbvf/vf.c | |||
@@ -274,6 +274,8 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set) | |||
274 | 274 | ||
275 | err = mbx->ops.read_posted(hw, msgbuf, 2); | 275 | err = mbx->ops.read_posted(hw, msgbuf, 2); |
276 | 276 | ||
277 | msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; | ||
278 | |||
277 | /* if nacked the vlan was rejected */ | 279 | /* if nacked the vlan was rejected */ |
278 | if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) | 280 | if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) |
279 | err = -E1000_ERR_MAC_INIT; | 281 | err = -E1000_ERR_MAC_INIT; |
@@ -317,6 +319,8 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) | |||
317 | if (!ret_val) | 319 | if (!ret_val) |
318 | ret_val = mbx->ops.read_posted(hw, msgbuf, 3); | 320 | ret_val = mbx->ops.read_posted(hw, msgbuf, 3); |
319 | 321 | ||
322 | msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; | ||
323 | |||
320 | /* if nacked the address was rejected, use "perm_addr" */ | 324 | /* if nacked the address was rejected, use "perm_addr" */ |
321 | if (!ret_val && | 325 | if (!ret_val && |
322 | (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) | 326 | (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) |
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c index c4361d466597..ee1cff5c9b21 100644 --- a/drivers/net/irda/au1k_ir.c +++ b/drivers/net/irda/au1k_ir.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
26 | #include <linux/etherdevice.h> | ||
27 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
28 | #include <linux/rtnetlink.h> | 27 | #include <linux/rtnetlink.h> |
29 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
@@ -205,9 +204,6 @@ static const struct net_device_ops au1k_irda_netdev_ops = { | |||
205 | .ndo_start_xmit = au1k_irda_hard_xmit, | 204 | .ndo_start_xmit = au1k_irda_hard_xmit, |
206 | .ndo_tx_timeout = au1k_tx_timeout, | 205 | .ndo_tx_timeout = au1k_tx_timeout, |
207 | .ndo_do_ioctl = au1k_irda_ioctl, | 206 | .ndo_do_ioctl = au1k_irda_ioctl, |
208 | .ndo_change_mtu = eth_change_mtu, | ||
209 | .ndo_validate_addr = eth_validate_addr, | ||
210 | .ndo_set_mac_address = eth_mac_addr, | ||
211 | }; | 207 | }; |
212 | 208 | ||
213 | static int au1k_irda_net_init(struct net_device *dev) | 209 | static int au1k_irda_net_init(struct net_device *dev) |
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index 3376a4f39e0a..77d10edefd25 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c | |||
@@ -803,9 +803,6 @@ static const struct net_device_ops pxa_irda_netdev_ops = { | |||
803 | .ndo_stop = pxa_irda_stop, | 803 | .ndo_stop = pxa_irda_stop, |
804 | .ndo_start_xmit = pxa_irda_hard_xmit, | 804 | .ndo_start_xmit = pxa_irda_hard_xmit, |
805 | .ndo_do_ioctl = pxa_irda_ioctl, | 805 | .ndo_do_ioctl = pxa_irda_ioctl, |
806 | .ndo_change_mtu = eth_change_mtu, | ||
807 | .ndo_validate_addr = eth_validate_addr, | ||
808 | .ndo_set_mac_address = eth_mac_addr, | ||
809 | }; | 806 | }; |
810 | 807 | ||
811 | static int pxa_irda_probe(struct platform_device *pdev) | 808 | static int pxa_irda_probe(struct platform_device *pdev) |
@@ -830,6 +827,7 @@ static int pxa_irda_probe(struct platform_device *pdev) | |||
830 | if (!dev) | 827 | if (!dev) |
831 | goto err_mem_3; | 828 | goto err_mem_3; |
832 | 829 | ||
830 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
833 | si = netdev_priv(dev); | 831 | si = netdev_priv(dev); |
834 | si->dev = &pdev->dev; | 832 | si->dev = &pdev->dev; |
835 | si->pdata = pdev->dev.platform_data; | 833 | si->pdata = pdev->dev.platform_data; |
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 2aeb2e6aec1b..b039cb081e94 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
26 | #include <linux/netdevice.h> | 26 | #include <linux/netdevice.h> |
27 | #include <linux/etherdevice.h> | ||
28 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
29 | #include <linux/rtnetlink.h> | 28 | #include <linux/rtnetlink.h> |
30 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
@@ -881,9 +880,6 @@ static const struct net_device_ops sa1100_irda_netdev_ops = { | |||
881 | .ndo_stop = sa1100_irda_stop, | 880 | .ndo_stop = sa1100_irda_stop, |
882 | .ndo_start_xmit = sa1100_irda_hard_xmit, | 881 | .ndo_start_xmit = sa1100_irda_hard_xmit, |
883 | .ndo_do_ioctl = sa1100_irda_ioctl, | 882 | .ndo_do_ioctl = sa1100_irda_ioctl, |
884 | .ndo_change_mtu = eth_change_mtu, | ||
885 | .ndo_validate_addr = eth_validate_addr, | ||
886 | .ndo_set_mac_address = eth_mac_addr, | ||
887 | }; | 883 | }; |
888 | 884 | ||
889 | static int sa1100_irda_probe(struct platform_device *pdev) | 885 | static int sa1100_irda_probe(struct platform_device *pdev) |
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index d0883835b0c6..fe4f2b2bff96 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c | |||
@@ -115,7 +115,7 @@ static int __init w83977af_init(void) | |||
115 | 115 | ||
116 | IRDA_DEBUG(0, "%s()\n", __func__ ); | 116 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
117 | 117 | ||
118 | for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) { | 118 | for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) { |
119 | if (w83977af_open(i, io[i], irq[i], dma[i]) == 0) | 119 | if (w83977af_open(i, io[i], irq[i], dma[i]) == 0) |
120 | return 0; | 120 | return 0; |
121 | } | 121 | } |
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 1b12c7ba275f..2c4dc8221dcd 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -96,6 +96,8 @@ | |||
96 | #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 | 96 | #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 |
97 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 | 97 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 |
98 | 98 | ||
99 | #define IXGBE_MAX_RSC_INT_RATE 162760 | ||
100 | |||
99 | /* wrapper around a pointer to a socket buffer, | 101 | /* wrapper around a pointer to a socket buffer, |
100 | * so a DMA handle can be stored along with the buffer */ | 102 | * so a DMA handle can be stored along with the buffer */ |
101 | struct ixgbe_tx_buffer { | 103 | struct ixgbe_tx_buffer { |
@@ -134,6 +136,8 @@ struct ixgbe_ring { | |||
134 | 136 | ||
135 | u8 queue_index; /* needed for multiqueue queue management */ | 137 | u8 queue_index; /* needed for multiqueue queue management */ |
136 | 138 | ||
139 | #define IXGBE_RING_RX_PS_ENABLED (u8)(1) | ||
140 | u8 flags; /* per ring feature flags */ | ||
137 | u16 head; | 141 | u16 head; |
138 | u16 tail; | 142 | u16 tail; |
139 | 143 | ||
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index b9923047ce11..522c03bc1dad 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c | |||
@@ -50,6 +50,51 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, | |||
50 | u8 *eeprom_data); | 50 | u8 *eeprom_data); |
51 | 51 | ||
52 | /** | 52 | /** |
53 | * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout | ||
54 | * @hw: pointer to the HW structure | ||
55 | * | ||
56 | * The defaults for 82598 should be in the range of 50us to 50ms, | ||
57 | * however the hardware default for these parts is 500us to 1ms which is less | ||
58 | * than the 10ms recommended by the pci-e spec. To address this we need to | ||
59 | * increase the value to either 10ms to 250ms for capability version 1 config, | ||
60 | * or 16ms to 55ms for version 2. | ||
61 | **/ | ||
62 | void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) | ||
63 | { | ||
64 | struct ixgbe_adapter *adapter = hw->back; | ||
65 | u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); | ||
66 | u16 pcie_devctl2; | ||
67 | |||
68 | /* only take action if timeout value is defaulted to 0 */ | ||
69 | if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) | ||
70 | goto out; | ||
71 | |||
72 | /* | ||
73 | * if capababilities version is type 1 we can write the | ||
74 | * timeout of 10ms to 250ms through the GCR register | ||
75 | */ | ||
76 | if (!(gcr & IXGBE_GCR_CAP_VER2)) { | ||
77 | gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; | ||
78 | goto out; | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * for version 2 capabilities we need to write the config space | ||
83 | * directly in order to set the completion timeout value for | ||
84 | * 16ms to 55ms | ||
85 | */ | ||
86 | pci_read_config_word(adapter->pdev, | ||
87 | IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2); | ||
88 | pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; | ||
89 | pci_write_config_word(adapter->pdev, | ||
90 | IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); | ||
91 | out: | ||
92 | /* disable completion timeout resend */ | ||
93 | gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; | ||
94 | IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); | ||
95 | } | ||
96 | |||
97 | /** | ||
53 | * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count | 98 | * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count |
54 | * @hw: pointer to hardware structure | 99 | * @hw: pointer to hardware structure |
55 | * | 100 | * |
@@ -153,6 +198,26 @@ out: | |||
153 | } | 198 | } |
154 | 199 | ||
155 | /** | 200 | /** |
201 | * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx | ||
202 | * @hw: pointer to hardware structure | ||
203 | * | ||
204 | * Starts the hardware using the generic start_hw function. | ||
205 | * Then set pcie completion timeout | ||
206 | **/ | ||
207 | s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) | ||
208 | { | ||
209 | s32 ret_val = 0; | ||
210 | |||
211 | ret_val = ixgbe_start_hw_generic(hw); | ||
212 | |||
213 | /* set the completion timeout for interface */ | ||
214 | if (ret_val == 0) | ||
215 | ixgbe_set_pcie_completion_timeout(hw); | ||
216 | |||
217 | return ret_val; | ||
218 | } | ||
219 | |||
220 | /** | ||
156 | * ixgbe_get_link_capabilities_82598 - Determines link capabilities | 221 | * ixgbe_get_link_capabilities_82598 - Determines link capabilities |
157 | * @hw: pointer to hardware structure | 222 | * @hw: pointer to hardware structure |
158 | * @speed: pointer to link speed | 223 | * @speed: pointer to link speed |
@@ -1085,7 +1150,7 @@ out: | |||
1085 | static struct ixgbe_mac_operations mac_ops_82598 = { | 1150 | static struct ixgbe_mac_operations mac_ops_82598 = { |
1086 | .init_hw = &ixgbe_init_hw_generic, | 1151 | .init_hw = &ixgbe_init_hw_generic, |
1087 | .reset_hw = &ixgbe_reset_hw_82598, | 1152 | .reset_hw = &ixgbe_reset_hw_82598, |
1088 | .start_hw = &ixgbe_start_hw_generic, | 1153 | .start_hw = &ixgbe_start_hw_82598, |
1089 | .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, | 1154 | .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, |
1090 | .get_media_type = &ixgbe_get_media_type_82598, | 1155 | .get_media_type = &ixgbe_get_media_type_82598, |
1091 | .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, | 1156 | .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, |
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 2a978008fd6e..dff8dfac7ed9 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -1948,6 +1948,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, | |||
1948 | struct ethtool_coalesce *ec) | 1948 | struct ethtool_coalesce *ec) |
1949 | { | 1949 | { |
1950 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 1950 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
1951 | struct ixgbe_q_vector *q_vector; | ||
1951 | int i; | 1952 | int i; |
1952 | 1953 | ||
1953 | if (ec->tx_max_coalesced_frames_irq) | 1954 | if (ec->tx_max_coalesced_frames_irq) |
@@ -1975,18 +1976,31 @@ static int ixgbe_set_coalesce(struct net_device *netdev, | |||
1975 | * any other value means disable eitr, which is best | 1976 | * any other value means disable eitr, which is best |
1976 | * served by setting the interrupt rate very high | 1977 | * served by setting the interrupt rate very high |
1977 | */ | 1978 | */ |
1978 | adapter->eitr_param = IXGBE_MAX_INT_RATE; | 1979 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) |
1980 | adapter->eitr_param = IXGBE_MAX_RSC_INT_RATE; | ||
1981 | else | ||
1982 | adapter->eitr_param = IXGBE_MAX_INT_RATE; | ||
1979 | adapter->itr_setting = 0; | 1983 | adapter->itr_setting = 0; |
1980 | } | 1984 | } |
1981 | 1985 | ||
1982 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { | 1986 | /* MSI/MSIx Interrupt Mode */ |
1983 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; | 1987 | if (adapter->flags & |
1984 | if (q_vector->txr_count && !q_vector->rxr_count) | 1988 | (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) { |
1985 | /* tx vector gets half the rate */ | 1989 | int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
1986 | q_vector->eitr = (adapter->eitr_param >> 1); | 1990 | for (i = 0; i < num_vectors; i++) { |
1987 | else | 1991 | q_vector = adapter->q_vector[i]; |
1988 | /* rx only or mixed */ | 1992 | if (q_vector->txr_count && !q_vector->rxr_count) |
1989 | q_vector->eitr = adapter->eitr_param; | 1993 | /* tx vector gets half the rate */ |
1994 | q_vector->eitr = (adapter->eitr_param >> 1); | ||
1995 | else | ||
1996 | /* rx only or mixed */ | ||
1997 | q_vector->eitr = adapter->eitr_param; | ||
1998 | ixgbe_write_eitr(q_vector); | ||
1999 | } | ||
2000 | /* Legacy Interrupt Mode */ | ||
2001 | } else { | ||
2002 | q_vector = adapter->q_vector[0]; | ||
2003 | q_vector->eitr = adapter->eitr_param; | ||
1990 | ixgbe_write_eitr(q_vector); | 2004 | ixgbe_write_eitr(q_vector); |
1991 | } | 2005 | } |
1992 | 2006 | ||
@@ -1999,13 +2013,13 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) | |||
1999 | 2013 | ||
2000 | ethtool_op_set_flags(netdev, data); | 2014 | ethtool_op_set_flags(netdev, data); |
2001 | 2015 | ||
2002 | if (!(adapter->flags & IXGBE_FLAG2_RSC_CAPABLE)) | 2016 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) |
2003 | return 0; | 2017 | return 0; |
2004 | 2018 | ||
2005 | /* if state changes we need to update adapter->flags and reset */ | 2019 | /* if state changes we need to update adapter->flags and reset */ |
2006 | if ((!!(data & ETH_FLAG_LRO)) != | 2020 | if ((!!(data & ETH_FLAG_LRO)) != |
2007 | (!!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED))) { | 2021 | (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { |
2008 | adapter->flags ^= IXGBE_FLAG2_RSC_ENABLED; | 2022 | adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; |
2009 | if (netif_running(netdev)) | 2023 | if (netif_running(netdev)) |
2010 | ixgbe_reinit_locked(adapter); | 2024 | ixgbe_reinit_locked(adapter); |
2011 | else | 2025 | else |
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index fa9f24e23683..28cf104e36cc 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c | |||
@@ -336,7 +336,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, | |||
336 | /* return 0 to bypass going to ULD for DDPed data */ | 336 | /* return 0 to bypass going to ULD for DDPed data */ |
337 | if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) | 337 | if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) |
338 | rc = 0; | 338 | rc = 0; |
339 | else | 339 | else if (ddp->len) |
340 | rc = ddp->len; | 340 | rc = ddp->len; |
341 | } | 341 | } |
342 | 342 | ||
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 200454f30f6a..77b0381a2b5c 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -492,12 +492,12 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, | |||
492 | 492 | ||
493 | skb_record_rx_queue(skb, ring->queue_index); | 493 | skb_record_rx_queue(skb, ring->queue_index); |
494 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { | 494 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { |
495 | if (adapter->vlgrp && is_vlan && (tag != 0)) | 495 | if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) |
496 | vlan_gro_receive(napi, adapter->vlgrp, tag, skb); | 496 | vlan_gro_receive(napi, adapter->vlgrp, tag, skb); |
497 | else | 497 | else |
498 | napi_gro_receive(napi, skb); | 498 | napi_gro_receive(napi, skb); |
499 | } else { | 499 | } else { |
500 | if (adapter->vlgrp && is_vlan && (tag != 0)) | 500 | if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) |
501 | vlan_hwaccel_rx(skb, adapter->vlgrp, tag); | 501 | vlan_hwaccel_rx(skb, adapter->vlgrp, tag); |
502 | else | 502 | else |
503 | netif_rx(skb); | 503 | netif_rx(skb); |
@@ -585,7 +585,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
585 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); | 585 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); |
586 | 586 | ||
587 | if (!bi->page_dma && | 587 | if (!bi->page_dma && |
588 | (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { | 588 | (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { |
589 | if (!bi->page) { | 589 | if (!bi->page) { |
590 | bi->page = alloc_page(GFP_ATOMIC); | 590 | bi->page = alloc_page(GFP_ATOMIC); |
591 | if (!bi->page) { | 591 | if (!bi->page) { |
@@ -629,7 +629,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
629 | } | 629 | } |
630 | /* Refresh the desc even if buffer_addrs didn't change because | 630 | /* Refresh the desc even if buffer_addrs didn't change because |
631 | * each write-back erases this info. */ | 631 | * each write-back erases this info. */ |
632 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 632 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
633 | rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); | 633 | rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); |
634 | rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); | 634 | rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); |
635 | } else { | 635 | } else { |
@@ -726,7 +726,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
726 | break; | 726 | break; |
727 | (*work_done)++; | 727 | (*work_done)++; |
728 | 728 | ||
729 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 729 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
730 | hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); | 730 | hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); |
731 | len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> | 731 | len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> |
732 | IXGBE_RXDADV_HDRBUFLEN_SHIFT; | 732 | IXGBE_RXDADV_HDRBUFLEN_SHIFT; |
@@ -780,7 +780,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
780 | prefetch(next_rxd); | 780 | prefetch(next_rxd); |
781 | cleaned_count++; | 781 | cleaned_count++; |
782 | 782 | ||
783 | if (adapter->flags & IXGBE_FLAG2_RSC_CAPABLE) | 783 | if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) |
784 | rsc_count = ixgbe_get_rsc_count(rx_desc); | 784 | rsc_count = ixgbe_get_rsc_count(rx_desc); |
785 | 785 | ||
786 | if (rsc_count) { | 786 | if (rsc_count) { |
@@ -798,7 +798,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
798 | rx_ring->stats.packets++; | 798 | rx_ring->stats.packets++; |
799 | rx_ring->stats.bytes += skb->len; | 799 | rx_ring->stats.bytes += skb->len; |
800 | } else { | 800 | } else { |
801 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 801 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
802 | rx_buffer_info->skb = next_buffer->skb; | 802 | rx_buffer_info->skb = next_buffer->skb; |
803 | rx_buffer_info->dma = next_buffer->dma; | 803 | rx_buffer_info->dma = next_buffer->dma; |
804 | next_buffer->skb = skb; | 804 | next_buffer->skb = skb; |
@@ -1898,46 +1898,19 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | |||
1898 | 1898 | ||
1899 | #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 | 1899 | #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 |
1900 | 1900 | ||
1901 | static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) | 1901 | static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, |
1902 | struct ixgbe_ring *rx_ring) | ||
1902 | { | 1903 | { |
1903 | struct ixgbe_ring *rx_ring; | ||
1904 | u32 srrctl; | 1904 | u32 srrctl; |
1905 | int queue0 = 0; | 1905 | int index; |
1906 | unsigned long mask; | ||
1907 | struct ixgbe_ring_feature *feature = adapter->ring_feature; | 1906 | struct ixgbe_ring_feature *feature = adapter->ring_feature; |
1908 | 1907 | ||
1909 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 1908 | index = rx_ring->reg_idx; |
1910 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 1909 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
1911 | int dcb_i = feature[RING_F_DCB].indices; | 1910 | unsigned long mask; |
1912 | if (dcb_i == 8) | ||
1913 | queue0 = index >> 4; | ||
1914 | else if (dcb_i == 4) | ||
1915 | queue0 = index >> 5; | ||
1916 | else | ||
1917 | dev_err(&adapter->pdev->dev, "Invalid DCB " | ||
1918 | "configuration\n"); | ||
1919 | #ifdef IXGBE_FCOE | ||
1920 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
1921 | struct ixgbe_ring_feature *f; | ||
1922 | |||
1923 | rx_ring = &adapter->rx_ring[queue0]; | ||
1924 | f = &adapter->ring_feature[RING_F_FCOE]; | ||
1925 | if ((queue0 == 0) && (index > rx_ring->reg_idx)) | ||
1926 | queue0 = f->mask + index - | ||
1927 | rx_ring->reg_idx - 1; | ||
1928 | } | ||
1929 | #endif /* IXGBE_FCOE */ | ||
1930 | } else { | ||
1931 | queue0 = index; | ||
1932 | } | ||
1933 | } else { | ||
1934 | mask = (unsigned long) feature[RING_F_RSS].mask; | 1911 | mask = (unsigned long) feature[RING_F_RSS].mask; |
1935 | queue0 = index & mask; | ||
1936 | index = index & mask; | 1912 | index = index & mask; |
1937 | } | 1913 | } |
1938 | |||
1939 | rx_ring = &adapter->rx_ring[queue0]; | ||
1940 | |||
1941 | srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); | 1914 | srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); |
1942 | 1915 | ||
1943 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; | 1916 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; |
@@ -1946,7 +1919,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) | |||
1946 | srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & | 1919 | srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & |
1947 | IXGBE_SRRCTL_BSIZEHDR_MASK; | 1920 | IXGBE_SRRCTL_BSIZEHDR_MASK; |
1948 | 1921 | ||
1949 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 1922 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
1950 | #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER | 1923 | #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER |
1951 | srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; | 1924 | srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
1952 | #else | 1925 | #else |
@@ -2002,6 +1975,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2002 | { | 1975 | { |
2003 | u64 rdba; | 1976 | u64 rdba; |
2004 | struct ixgbe_hw *hw = &adapter->hw; | 1977 | struct ixgbe_hw *hw = &adapter->hw; |
1978 | struct ixgbe_ring *rx_ring; | ||
2005 | struct net_device *netdev = adapter->netdev; | 1979 | struct net_device *netdev = adapter->netdev; |
2006 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | 1980 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
2007 | int i, j; | 1981 | int i, j; |
@@ -2018,11 +1992,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2018 | /* Decide whether to use packet split mode or not */ | 1992 | /* Decide whether to use packet split mode or not */ |
2019 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | 1993 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; |
2020 | 1994 | ||
2021 | #ifdef IXGBE_FCOE | ||
2022 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | ||
2023 | adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; | ||
2024 | #endif /* IXGBE_FCOE */ | ||
2025 | |||
2026 | /* Set the RX buffer length according to the mode */ | 1995 | /* Set the RX buffer length according to the mode */ |
2027 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 1996 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
2028 | rx_buf_len = IXGBE_RX_HDR_SIZE; | 1997 | rx_buf_len = IXGBE_RX_HDR_SIZE; |
@@ -2036,7 +2005,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2036 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); | 2005 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); |
2037 | } | 2006 | } |
2038 | } else { | 2007 | } else { |
2039 | if (!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED) && | 2008 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && |
2040 | (netdev->mtu <= ETH_DATA_LEN)) | 2009 | (netdev->mtu <= ETH_DATA_LEN)) |
2041 | rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; | 2010 | rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
2042 | else | 2011 | else |
@@ -2070,29 +2039,35 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2070 | * the Base and Length of the Rx Descriptor Ring | 2039 | * the Base and Length of the Rx Descriptor Ring |
2071 | */ | 2040 | */ |
2072 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2041 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2073 | rdba = adapter->rx_ring[i].dma; | 2042 | rx_ring = &adapter->rx_ring[i]; |
2074 | j = adapter->rx_ring[i].reg_idx; | 2043 | rdba = rx_ring->dma; |
2044 | j = rx_ring->reg_idx; | ||
2075 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32))); | 2045 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32))); |
2076 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); | 2046 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); |
2077 | IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen); | 2047 | IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen); |
2078 | IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); | 2048 | IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); |
2079 | IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); | 2049 | IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); |
2080 | adapter->rx_ring[i].head = IXGBE_RDH(j); | 2050 | rx_ring->head = IXGBE_RDH(j); |
2081 | adapter->rx_ring[i].tail = IXGBE_RDT(j); | 2051 | rx_ring->tail = IXGBE_RDT(j); |
2082 | adapter->rx_ring[i].rx_buf_len = rx_buf_len; | 2052 | rx_ring->rx_buf_len = rx_buf_len; |
2053 | |||
2054 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) | ||
2055 | rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; | ||
2083 | 2056 | ||
2084 | #ifdef IXGBE_FCOE | 2057 | #ifdef IXGBE_FCOE |
2085 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | 2058 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { |
2086 | struct ixgbe_ring_feature *f; | 2059 | struct ixgbe_ring_feature *f; |
2087 | f = &adapter->ring_feature[RING_F_FCOE]; | 2060 | f = &adapter->ring_feature[RING_F_FCOE]; |
2088 | if ((rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) && | 2061 | if ((i >= f->mask) && (i < f->mask + f->indices)) { |
2089 | (i >= f->mask) && (i < f->mask + f->indices)) | 2062 | rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; |
2090 | adapter->rx_ring[i].rx_buf_len = | 2063 | if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) |
2091 | IXGBE_FCOE_JUMBO_FRAME_SIZE; | 2064 | rx_ring->rx_buf_len = |
2065 | IXGBE_FCOE_JUMBO_FRAME_SIZE; | ||
2066 | } | ||
2092 | } | 2067 | } |
2093 | 2068 | ||
2094 | #endif /* IXGBE_FCOE */ | 2069 | #endif /* IXGBE_FCOE */ |
2095 | ixgbe_configure_srrctl(adapter, j); | 2070 | ixgbe_configure_srrctl(adapter, rx_ring); |
2096 | } | 2071 | } |
2097 | 2072 | ||
2098 | if (hw->mac.type == ixgbe_mac_82598EB) { | 2073 | if (hw->mac.type == ixgbe_mac_82598EB) { |
@@ -2165,10 +2140,11 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2165 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); | 2140 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); |
2166 | } | 2141 | } |
2167 | 2142 | ||
2168 | if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) { | 2143 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { |
2169 | /* Enable 82599 HW-RSC */ | 2144 | /* Enable 82599 HW-RSC */ |
2170 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2145 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2171 | j = adapter->rx_ring[i].reg_idx; | 2146 | rx_ring = &adapter->rx_ring[i]; |
2147 | j = rx_ring->reg_idx; | ||
2172 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); | 2148 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); |
2173 | rscctrl |= IXGBE_RSCCTL_RSCEN; | 2149 | rscctrl |= IXGBE_RSCCTL_RSCEN; |
2174 | /* | 2150 | /* |
@@ -2176,7 +2152,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2176 | * total size of max desc * buf_len is not greater | 2152 | * total size of max desc * buf_len is not greater |
2177 | * than 65535 | 2153 | * than 65535 |
2178 | */ | 2154 | */ |
2179 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 2155 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
2180 | #if (MAX_SKB_FRAGS > 16) | 2156 | #if (MAX_SKB_FRAGS > 16) |
2181 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; | 2157 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; |
2182 | #elif (MAX_SKB_FRAGS > 8) | 2158 | #elif (MAX_SKB_FRAGS > 8) |
@@ -3812,8 +3788,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
3812 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; | 3788 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; |
3813 | } else if (hw->mac.type == ixgbe_mac_82599EB) { | 3789 | } else if (hw->mac.type == ixgbe_mac_82599EB) { |
3814 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; | 3790 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; |
3815 | adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE; | 3791 | adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; |
3816 | adapter->flags |= IXGBE_FLAG2_RSC_ENABLED; | 3792 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; |
3817 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | 3793 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; |
3818 | adapter->ring_feature[RING_F_FDIR].indices = | 3794 | adapter->ring_feature[RING_F_FDIR].indices = |
3819 | IXGBE_MAX_FDIR_INDICES; | 3795 | IXGBE_MAX_FDIR_INDICES; |
@@ -5360,12 +5336,19 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev) | |||
5360 | static void ixgbe_netpoll(struct net_device *netdev) | 5336 | static void ixgbe_netpoll(struct net_device *netdev) |
5361 | { | 5337 | { |
5362 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 5338 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
5339 | int i; | ||
5363 | 5340 | ||
5364 | disable_irq(adapter->pdev->irq); | ||
5365 | adapter->flags |= IXGBE_FLAG_IN_NETPOLL; | 5341 | adapter->flags |= IXGBE_FLAG_IN_NETPOLL; |
5366 | ixgbe_intr(adapter->pdev->irq, netdev); | 5342 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
5343 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
5344 | for (i = 0; i < num_q_vectors; i++) { | ||
5345 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; | ||
5346 | ixgbe_msix_clean_many(0, q_vector); | ||
5347 | } | ||
5348 | } else { | ||
5349 | ixgbe_intr(adapter->pdev->irq, netdev); | ||
5350 | } | ||
5367 | adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; | 5351 | adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; |
5368 | enable_irq(adapter->pdev->irq); | ||
5369 | } | 5352 | } |
5370 | #endif | 5353 | #endif |
5371 | 5354 | ||
@@ -5611,7 +5594,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5611 | if (pci_using_dac) | 5594 | if (pci_using_dac) |
5612 | netdev->features |= NETIF_F_HIGHDMA; | 5595 | netdev->features |= NETIF_F_HIGHDMA; |
5613 | 5596 | ||
5614 | if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) | 5597 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) |
5615 | netdev->features |= NETIF_F_LRO; | 5598 | netdev->features |= NETIF_F_LRO; |
5616 | 5599 | ||
5617 | /* make sure the EEPROM is good */ | 5600 | /* make sure the EEPROM is good */ |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index fa87309dc087..be90eb4575f6 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -718,6 +718,12 @@ | |||
718 | #define IXGBE_ECC_STATUS_82599 0x110E0 | 718 | #define IXGBE_ECC_STATUS_82599 0x110E0 |
719 | #define IXGBE_BAR_CTRL_82599 0x110F4 | 719 | #define IXGBE_BAR_CTRL_82599 0x110F4 |
720 | 720 | ||
721 | /* PCI Express Control */ | ||
722 | #define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 | ||
723 | #define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 | ||
724 | #define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 | ||
725 | #define IXGBE_GCR_CAP_VER2 0x00040000 | ||
726 | |||
721 | /* Time Sync Registers */ | 727 | /* Time Sync Registers */ |
722 | #define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ | 728 | #define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ |
723 | #define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ | 729 | #define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ |
@@ -1521,6 +1527,7 @@ | |||
1521 | 1527 | ||
1522 | /* PCI Bus Info */ | 1528 | /* PCI Bus Info */ |
1523 | #define IXGBE_PCI_LINK_STATUS 0xB2 | 1529 | #define IXGBE_PCI_LINK_STATUS 0xB2 |
1530 | #define IXGBE_PCI_DEVICE_CONTROL2 0xC8 | ||
1524 | #define IXGBE_PCI_LINK_WIDTH 0x3F0 | 1531 | #define IXGBE_PCI_LINK_WIDTH 0x3F0 |
1525 | #define IXGBE_PCI_LINK_WIDTH_1 0x10 | 1532 | #define IXGBE_PCI_LINK_WIDTH_1 0x10 |
1526 | #define IXGBE_PCI_LINK_WIDTH_2 0x20 | 1533 | #define IXGBE_PCI_LINK_WIDTH_2 0x20 |
@@ -1531,6 +1538,7 @@ | |||
1531 | #define IXGBE_PCI_LINK_SPEED_5000 0x2 | 1538 | #define IXGBE_PCI_LINK_SPEED_5000 0x2 |
1532 | #define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E | 1539 | #define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E |
1533 | #define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 | 1540 | #define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 |
1541 | #define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 | ||
1534 | 1542 | ||
1535 | /* Number of 100 microseconds we wait for PCI Express master disable */ | 1543 | /* Number of 100 microseconds we wait for PCI Express master disable */ |
1536 | #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 | 1544 | #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 |
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index 2a0174b62e96..92fb8235c766 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c | |||
@@ -41,6 +41,7 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
41 | struct ixpdev_priv *ip = netdev_priv(dev); | 41 | struct ixpdev_priv *ip = netdev_priv(dev); |
42 | struct ixpdev_tx_desc *desc; | 42 | struct ixpdev_tx_desc *desc; |
43 | int entry; | 43 | int entry; |
44 | unsigned long flags; | ||
44 | 45 | ||
45 | if (unlikely(skb->len > PAGE_SIZE)) { | 46 | if (unlikely(skb->len > PAGE_SIZE)) { |
46 | /* @@@ Count drops. */ | 47 | /* @@@ Count drops. */ |
@@ -63,11 +64,11 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
63 | 64 | ||
64 | dev->trans_start = jiffies; | 65 | dev->trans_start = jiffies; |
65 | 66 | ||
66 | local_irq_disable(); | 67 | local_irq_save(flags); |
67 | ip->tx_queue_entries++; | 68 | ip->tx_queue_entries++; |
68 | if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN) | 69 | if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN) |
69 | netif_stop_queue(dev); | 70 | netif_stop_queue(dev); |
70 | local_irq_enable(); | 71 | local_irq_restore(flags); |
71 | 72 | ||
72 | return 0; | 73 | return 0; |
73 | } | 74 | } |
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 5b5c25368d1e..e3601cf3f931 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -620,6 +620,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
620 | dma_addr_t mapping; | 620 | dma_addr_t mapping; |
621 | unsigned int len, entry; | 621 | unsigned int len, entry; |
622 | u32 ctrl; | 622 | u32 ctrl; |
623 | unsigned long flags; | ||
623 | 624 | ||
624 | #ifdef DEBUG | 625 | #ifdef DEBUG |
625 | int i; | 626 | int i; |
@@ -635,12 +636,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
635 | #endif | 636 | #endif |
636 | 637 | ||
637 | len = skb->len; | 638 | len = skb->len; |
638 | spin_lock_irq(&bp->lock); | 639 | spin_lock_irqsave(&bp->lock, flags); |
639 | 640 | ||
640 | /* This is a hard error, log it. */ | 641 | /* This is a hard error, log it. */ |
641 | if (TX_BUFFS_AVAIL(bp) < 1) { | 642 | if (TX_BUFFS_AVAIL(bp) < 1) { |
642 | netif_stop_queue(dev); | 643 | netif_stop_queue(dev); |
643 | spin_unlock_irq(&bp->lock); | 644 | spin_unlock_irqrestore(&bp->lock, flags); |
644 | dev_err(&bp->pdev->dev, | 645 | dev_err(&bp->pdev->dev, |
645 | "BUG! Tx Ring full when queue awake!\n"); | 646 | "BUG! Tx Ring full when queue awake!\n"); |
646 | dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", | 647 | dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", |
@@ -674,7 +675,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
674 | if (TX_BUFFS_AVAIL(bp) < 1) | 675 | if (TX_BUFFS_AVAIL(bp) < 1) |
675 | netif_stop_queue(dev); | 676 | netif_stop_queue(dev); |
676 | 677 | ||
677 | spin_unlock_irq(&bp->lock); | 678 | spin_unlock_irqrestore(&bp->lock, flags); |
678 | 679 | ||
679 | dev->trans_start = jiffies; | 680 | dev->trans_start = jiffies; |
680 | 681 | ||
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 91bdfdfd431f..3ac0404d0d11 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c | |||
@@ -506,8 +506,9 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, | |||
506 | PCI_DMA_FROMDEVICE); | 506 | PCI_DMA_FROMDEVICE); |
507 | } | 507 | } |
508 | /* Adjust size of last fragment to match actual length */ | 508 | /* Adjust size of last fragment to match actual length */ |
509 | skb_frags_rx[nr - 1].size = length - | 509 | if (nr > 0) |
510 | priv->frag_info[nr - 1].frag_prefix_size; | 510 | skb_frags_rx[nr - 1].size = length - |
511 | priv->frag_info[nr - 1].frag_prefix_size; | ||
511 | return nr; | 512 | return nr; |
512 | 513 | ||
513 | fail: | 514 | fail: |
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 08c43f2ae72b..62208401c4df 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c | |||
@@ -249,6 +249,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, | |||
249 | pci_unmap_page(mdev->pdev, | 249 | pci_unmap_page(mdev->pdev, |
250 | (dma_addr_t) be64_to_cpu(data->addr), | 250 | (dma_addr_t) be64_to_cpu(data->addr), |
251 | frag->size, PCI_DMA_TODEVICE); | 251 | frag->size, PCI_DMA_TODEVICE); |
252 | ++data; | ||
252 | } | 253 | } |
253 | } | 254 | } |
254 | /* Stamp the freed descriptor */ | 255 | /* Stamp the freed descriptor */ |
@@ -436,6 +437,7 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) | |||
436 | { | 437 | { |
437 | struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; | 438 | struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; |
438 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; | 439 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; |
440 | unsigned long flags; | ||
439 | 441 | ||
440 | /* If we don't have a pending timer, set one up to catch our recent | 442 | /* If we don't have a pending timer, set one up to catch our recent |
441 | post in case the interface becomes idle */ | 443 | post in case the interface becomes idle */ |
@@ -444,9 +446,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) | |||
444 | 446 | ||
445 | /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ | 447 | /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ |
446 | if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) | 448 | if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) |
447 | if (spin_trylock_irq(&ring->comp_lock)) { | 449 | if (spin_trylock_irqsave(&ring->comp_lock, flags)) { |
448 | mlx4_en_process_tx_cq(priv->dev, cq); | 450 | mlx4_en_process_tx_cq(priv->dev, cq); |
449 | spin_unlock_irq(&ring->comp_lock); | 451 | spin_unlock_irqrestore(&ring->comp_lock, flags); |
450 | } | 452 | } |
451 | } | 453 | } |
452 | 454 | ||
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index f86e05047d19..a9c1fcca5e75 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -1254,7 +1254,7 @@ struct netxen_adapter { | |||
1254 | u8 mc_enabled; | 1254 | u8 mc_enabled; |
1255 | u8 max_mc_count; | 1255 | u8 max_mc_count; |
1256 | u8 rss_supported; | 1256 | u8 rss_supported; |
1257 | u8 resv2; | 1257 | u8 link_changed; |
1258 | u32 resv3; | 1258 | u32 resv3; |
1259 | 1259 | ||
1260 | u8 has_link_events; | 1260 | u8 has_link_events; |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 7acf204e38c9..5d3343ef3d86 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -184,13 +184,6 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter) | |||
184 | kfree(recv_ctx->rds_rings); | 184 | kfree(recv_ctx->rds_rings); |
185 | 185 | ||
186 | skip_rds: | 186 | skip_rds: |
187 | if (recv_ctx->sds_rings == NULL) | ||
188 | goto skip_sds; | ||
189 | |||
190 | for(ring = 0; ring < adapter->max_sds_rings; ring++) | ||
191 | recv_ctx->sds_rings[ring].consumer = 0; | ||
192 | |||
193 | skip_sds: | ||
194 | if (adapter->tx_ring == NULL) | 187 | if (adapter->tx_ring == NULL) |
195 | return; | 188 | return; |
196 | 189 | ||
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 637ac8b89bac..28f270f5ac78 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -94,10 +94,6 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = { | |||
94 | 94 | ||
95 | MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); | 95 | MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); |
96 | 96 | ||
97 | static struct workqueue_struct *netxen_workq; | ||
98 | #define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp) | ||
99 | #define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq) | ||
100 | |||
101 | static void netxen_watchdog(unsigned long); | 97 | static void netxen_watchdog(unsigned long); |
102 | 98 | ||
103 | static uint32_t crb_cmd_producer[4] = { | 99 | static uint32_t crb_cmd_producer[4] = { |
@@ -171,6 +167,8 @@ netxen_free_sds_rings(struct netxen_recv_context *recv_ctx) | |||
171 | { | 167 | { |
172 | if (recv_ctx->sds_rings != NULL) | 168 | if (recv_ctx->sds_rings != NULL) |
173 | kfree(recv_ctx->sds_rings); | 169 | kfree(recv_ctx->sds_rings); |
170 | |||
171 | recv_ctx->sds_rings = NULL; | ||
174 | } | 172 | } |
175 | 173 | ||
176 | static int | 174 | static int |
@@ -193,6 +191,21 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) | |||
193 | } | 191 | } |
194 | 192 | ||
195 | static void | 193 | static void |
194 | netxen_napi_del(struct netxen_adapter *adapter) | ||
195 | { | ||
196 | int ring; | ||
197 | struct nx_host_sds_ring *sds_ring; | ||
198 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; | ||
199 | |||
200 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | ||
201 | sds_ring = &recv_ctx->sds_rings[ring]; | ||
202 | netif_napi_del(&sds_ring->napi); | ||
203 | } | ||
204 | |||
205 | netxen_free_sds_rings(&adapter->recv_ctx); | ||
206 | } | ||
207 | |||
208 | static void | ||
196 | netxen_napi_enable(struct netxen_adapter *adapter) | 209 | netxen_napi_enable(struct netxen_adapter *adapter) |
197 | { | 210 | { |
198 | int ring; | 211 | int ring; |
@@ -221,7 +234,7 @@ netxen_napi_disable(struct netxen_adapter *adapter) | |||
221 | } | 234 | } |
222 | } | 235 | } |
223 | 236 | ||
224 | static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) | 237 | static int nx_set_dma_mask(struct netxen_adapter *adapter) |
225 | { | 238 | { |
226 | struct pci_dev *pdev = adapter->pdev; | 239 | struct pci_dev *pdev = adapter->pdev; |
227 | uint64_t mask, cmask; | 240 | uint64_t mask, cmask; |
@@ -229,19 +242,17 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) | |||
229 | adapter->pci_using_dac = 0; | 242 | adapter->pci_using_dac = 0; |
230 | 243 | ||
231 | mask = DMA_BIT_MASK(32); | 244 | mask = DMA_BIT_MASK(32); |
232 | /* | ||
233 | * Consistent DMA mask is set to 32 bit because it cannot be set to | ||
234 | * 35 bits. For P3 also leave it at 32 bits for now. Only the rings | ||
235 | * come off this pool. | ||
236 | */ | ||
237 | cmask = DMA_BIT_MASK(32); | 245 | cmask = DMA_BIT_MASK(32); |
238 | 246 | ||
247 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { | ||
239 | #ifndef CONFIG_IA64 | 248 | #ifndef CONFIG_IA64 |
240 | if (revision_id >= NX_P3_B0) | ||
241 | mask = DMA_BIT_MASK(39); | ||
242 | else if (revision_id == NX_P2_C1) | ||
243 | mask = DMA_BIT_MASK(35); | 249 | mask = DMA_BIT_MASK(35); |
244 | #endif | 250 | #endif |
251 | } else { | ||
252 | mask = DMA_BIT_MASK(39); | ||
253 | cmask = mask; | ||
254 | } | ||
255 | |||
245 | if (pci_set_dma_mask(pdev, mask) == 0 && | 256 | if (pci_set_dma_mask(pdev, mask) == 0 && |
246 | pci_set_consistent_dma_mask(pdev, cmask) == 0) { | 257 | pci_set_consistent_dma_mask(pdev, cmask) == 0) { |
247 | adapter->pci_using_dac = 1; | 258 | adapter->pci_using_dac = 1; |
@@ -256,13 +267,13 @@ static int | |||
256 | nx_update_dma_mask(struct netxen_adapter *adapter) | 267 | nx_update_dma_mask(struct netxen_adapter *adapter) |
257 | { | 268 | { |
258 | int change, shift, err; | 269 | int change, shift, err; |
259 | uint64_t mask, old_mask; | 270 | uint64_t mask, old_mask, old_cmask; |
260 | struct pci_dev *pdev = adapter->pdev; | 271 | struct pci_dev *pdev = adapter->pdev; |
261 | 272 | ||
262 | change = 0; | 273 | change = 0; |
263 | 274 | ||
264 | shift = NXRD32(adapter, CRB_DMA_SHIFT); | 275 | shift = NXRD32(adapter, CRB_DMA_SHIFT); |
265 | if (shift >= 32) | 276 | if (shift > 32) |
266 | return 0; | 277 | return 0; |
267 | 278 | ||
268 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9)) | 279 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9)) |
@@ -272,14 +283,29 @@ nx_update_dma_mask(struct netxen_adapter *adapter) | |||
272 | 283 | ||
273 | if (change) { | 284 | if (change) { |
274 | old_mask = pdev->dma_mask; | 285 | old_mask = pdev->dma_mask; |
275 | mask = (1ULL<<(32+shift)) - 1; | 286 | old_cmask = pdev->dev.coherent_dma_mask; |
287 | |||
288 | mask = DMA_BIT_MASK(32+shift); | ||
276 | 289 | ||
277 | err = pci_set_dma_mask(pdev, mask); | 290 | err = pci_set_dma_mask(pdev, mask); |
278 | if (err) | 291 | if (err) |
279 | return pci_set_dma_mask(pdev, old_mask); | 292 | goto err_out; |
293 | |||
294 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { | ||
295 | |||
296 | err = pci_set_consistent_dma_mask(pdev, mask); | ||
297 | if (err) | ||
298 | goto err_out; | ||
299 | } | ||
300 | dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift); | ||
280 | } | 301 | } |
281 | 302 | ||
282 | return 0; | 303 | return 0; |
304 | |||
305 | err_out: | ||
306 | pci_set_dma_mask(pdev, old_mask); | ||
307 | pci_set_consistent_dma_mask(pdev, old_cmask); | ||
308 | return err; | ||
283 | } | 309 | } |
284 | 310 | ||
285 | static void netxen_check_options(struct netxen_adapter *adapter) | 311 | static void netxen_check_options(struct netxen_adapter *adapter) |
@@ -867,7 +893,6 @@ netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) | |||
867 | spin_unlock(&adapter->tx_clean_lock); | 893 | spin_unlock(&adapter->tx_clean_lock); |
868 | 894 | ||
869 | del_timer_sync(&adapter->watchdog_timer); | 895 | del_timer_sync(&adapter->watchdog_timer); |
870 | FLUSH_SCHEDULED_WORK(); | ||
871 | } | 896 | } |
872 | 897 | ||
873 | 898 | ||
@@ -881,10 +906,12 @@ netxen_nic_attach(struct netxen_adapter *adapter) | |||
881 | struct nx_host_tx_ring *tx_ring; | 906 | struct nx_host_tx_ring *tx_ring; |
882 | 907 | ||
883 | err = netxen_init_firmware(adapter); | 908 | err = netxen_init_firmware(adapter); |
884 | if (err != 0) { | 909 | if (err) |
885 | printk(KERN_ERR "Failed to init firmware\n"); | 910 | return err; |
886 | return -EIO; | 911 | |
887 | } | 912 | err = netxen_napi_add(adapter, netdev); |
913 | if (err) | ||
914 | return err; | ||
888 | 915 | ||
889 | if (adapter->fw_major < 4) | 916 | if (adapter->fw_major < 4) |
890 | adapter->max_rds_rings = 3; | 917 | adapter->max_rds_rings = 3; |
@@ -948,6 +975,7 @@ netxen_nic_detach(struct netxen_adapter *adapter) | |||
948 | netxen_free_hw_resources(adapter); | 975 | netxen_free_hw_resources(adapter); |
949 | netxen_release_rx_buffers(adapter); | 976 | netxen_release_rx_buffers(adapter); |
950 | netxen_nic_free_irq(adapter); | 977 | netxen_nic_free_irq(adapter); |
978 | netxen_napi_del(adapter); | ||
951 | netxen_free_sw_resources(adapter); | 979 | netxen_free_sw_resources(adapter); |
952 | 980 | ||
953 | adapter->is_up = 0; | 981 | adapter->is_up = 0; |
@@ -1006,7 +1034,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1006 | revision_id = pdev->revision; | 1034 | revision_id = pdev->revision; |
1007 | adapter->ahw.revision_id = revision_id; | 1035 | adapter->ahw.revision_id = revision_id; |
1008 | 1036 | ||
1009 | err = nx_set_dma_mask(adapter, revision_id); | 1037 | err = nx_set_dma_mask(adapter); |
1010 | if (err) | 1038 | if (err) |
1011 | goto err_out_free_netdev; | 1039 | goto err_out_free_netdev; |
1012 | 1040 | ||
@@ -1092,9 +1120,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1092 | 1120 | ||
1093 | netdev->irq = adapter->msix_entries[0].vector; | 1121 | netdev->irq = adapter->msix_entries[0].vector; |
1094 | 1122 | ||
1095 | if (netxen_napi_add(adapter, netdev)) | ||
1096 | goto err_out_disable_msi; | ||
1097 | |||
1098 | init_timer(&adapter->watchdog_timer); | 1123 | init_timer(&adapter->watchdog_timer); |
1099 | adapter->watchdog_timer.function = &netxen_watchdog; | 1124 | adapter->watchdog_timer.function = &netxen_watchdog; |
1100 | adapter->watchdog_timer.data = (unsigned long)adapter; | 1125 | adapter->watchdog_timer.data = (unsigned long)adapter; |
@@ -1164,6 +1189,9 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) | |||
1164 | 1189 | ||
1165 | unregister_netdev(netdev); | 1190 | unregister_netdev(netdev); |
1166 | 1191 | ||
1192 | cancel_work_sync(&adapter->watchdog_task); | ||
1193 | cancel_work_sync(&adapter->tx_timeout_task); | ||
1194 | |||
1167 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { | 1195 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { |
1168 | netxen_nic_detach(adapter); | 1196 | netxen_nic_detach(adapter); |
1169 | } | 1197 | } |
@@ -1172,7 +1200,6 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) | |||
1172 | netxen_free_adapter_offload(adapter); | 1200 | netxen_free_adapter_offload(adapter); |
1173 | 1201 | ||
1174 | netxen_teardown_intr(adapter); | 1202 | netxen_teardown_intr(adapter); |
1175 | netxen_free_sds_rings(&adapter->recv_ctx); | ||
1176 | 1203 | ||
1177 | netxen_cleanup_pci_map(adapter); | 1204 | netxen_cleanup_pci_map(adapter); |
1178 | 1205 | ||
@@ -1198,6 +1225,9 @@ netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) | |||
1198 | if (netif_running(netdev)) | 1225 | if (netif_running(netdev)) |
1199 | netxen_nic_down(adapter, netdev); | 1226 | netxen_nic_down(adapter, netdev); |
1200 | 1227 | ||
1228 | cancel_work_sync(&adapter->watchdog_task); | ||
1229 | cancel_work_sync(&adapter->tx_timeout_task); | ||
1230 | |||
1201 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) | 1231 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) |
1202 | netxen_nic_detach(adapter); | 1232 | netxen_nic_detach(adapter); |
1203 | 1233 | ||
@@ -1536,11 +1566,6 @@ static int netxen_nic_check_temp(struct netxen_adapter *adapter) | |||
1536 | "%s: Device temperature %d degrees C exceeds" | 1566 | "%s: Device temperature %d degrees C exceeds" |
1537 | " maximum allowed. Hardware has been shut down.\n", | 1567 | " maximum allowed. Hardware has been shut down.\n", |
1538 | netdev->name, temp_val); | 1568 | netdev->name, temp_val); |
1539 | |||
1540 | netif_device_detach(netdev); | ||
1541 | netxen_nic_down(adapter, netdev); | ||
1542 | netxen_nic_detach(adapter); | ||
1543 | |||
1544 | rv = 1; | 1569 | rv = 1; |
1545 | } else if (temp_state == NX_TEMP_WARN) { | 1570 | } else if (temp_state == NX_TEMP_WARN) { |
1546 | if (adapter->temp == NX_TEMP_NORMAL) { | 1571 | if (adapter->temp == NX_TEMP_NORMAL) { |
@@ -1574,10 +1599,7 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup) | |||
1574 | netif_carrier_off(netdev); | 1599 | netif_carrier_off(netdev); |
1575 | netif_stop_queue(netdev); | 1600 | netif_stop_queue(netdev); |
1576 | } | 1601 | } |
1577 | 1602 | adapter->link_changed = !adapter->has_link_events; | |
1578 | if (!adapter->has_link_events) | ||
1579 | netxen_nic_set_link_parameters(adapter); | ||
1580 | |||
1581 | } else if (!adapter->ahw.linkup && linkup) { | 1603 | } else if (!adapter->ahw.linkup && linkup) { |
1582 | printk(KERN_INFO "%s: %s NIC Link is up\n", | 1604 | printk(KERN_INFO "%s: %s NIC Link is up\n", |
1583 | netxen_nic_driver_name, netdev->name); | 1605 | netxen_nic_driver_name, netdev->name); |
@@ -1586,9 +1608,7 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup) | |||
1586 | netif_carrier_on(netdev); | 1608 | netif_carrier_on(netdev); |
1587 | netif_wake_queue(netdev); | 1609 | netif_wake_queue(netdev); |
1588 | } | 1610 | } |
1589 | 1611 | adapter->link_changed = !adapter->has_link_events; | |
1590 | if (!adapter->has_link_events) | ||
1591 | netxen_nic_set_link_parameters(adapter); | ||
1592 | } | 1612 | } |
1593 | } | 1613 | } |
1594 | 1614 | ||
@@ -1615,11 +1635,36 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) | |||
1615 | netxen_advert_link_change(adapter, linkup); | 1635 | netxen_advert_link_change(adapter, linkup); |
1616 | } | 1636 | } |
1617 | 1637 | ||
1638 | static void netxen_nic_thermal_shutdown(struct netxen_adapter *adapter) | ||
1639 | { | ||
1640 | struct net_device *netdev = adapter->netdev; | ||
1641 | |||
1642 | netif_device_detach(netdev); | ||
1643 | netxen_nic_down(adapter, netdev); | ||
1644 | netxen_nic_detach(adapter); | ||
1645 | } | ||
1646 | |||
1618 | static void netxen_watchdog(unsigned long v) | 1647 | static void netxen_watchdog(unsigned long v) |
1619 | { | 1648 | { |
1620 | struct netxen_adapter *adapter = (struct netxen_adapter *)v; | 1649 | struct netxen_adapter *adapter = (struct netxen_adapter *)v; |
1621 | 1650 | ||
1622 | SCHEDULE_WORK(&adapter->watchdog_task); | 1651 | if (netxen_nic_check_temp(adapter)) |
1652 | goto do_sched; | ||
1653 | |||
1654 | if (!adapter->has_link_events) { | ||
1655 | netxen_nic_handle_phy_intr(adapter); | ||
1656 | |||
1657 | if (adapter->link_changed) | ||
1658 | goto do_sched; | ||
1659 | } | ||
1660 | |||
1661 | if (netif_running(adapter->netdev)) | ||
1662 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); | ||
1663 | |||
1664 | return; | ||
1665 | |||
1666 | do_sched: | ||
1667 | schedule_work(&adapter->watchdog_task); | ||
1623 | } | 1668 | } |
1624 | 1669 | ||
1625 | void netxen_watchdog_task(struct work_struct *work) | 1670 | void netxen_watchdog_task(struct work_struct *work) |
@@ -1627,11 +1672,13 @@ void netxen_watchdog_task(struct work_struct *work) | |||
1627 | struct netxen_adapter *adapter = | 1672 | struct netxen_adapter *adapter = |
1628 | container_of(work, struct netxen_adapter, watchdog_task); | 1673 | container_of(work, struct netxen_adapter, watchdog_task); |
1629 | 1674 | ||
1630 | if (netxen_nic_check_temp(adapter)) | 1675 | if (adapter->temp == NX_TEMP_PANIC) { |
1676 | netxen_nic_thermal_shutdown(adapter); | ||
1631 | return; | 1677 | return; |
1678 | } | ||
1632 | 1679 | ||
1633 | if (!adapter->has_link_events) | 1680 | if (adapter->link_changed) |
1634 | netxen_nic_handle_phy_intr(adapter); | 1681 | netxen_nic_set_link_parameters(adapter); |
1635 | 1682 | ||
1636 | if (netif_running(adapter->netdev)) | 1683 | if (netif_running(adapter->netdev)) |
1637 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); | 1684 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); |
@@ -1639,9 +1686,8 @@ void netxen_watchdog_task(struct work_struct *work) | |||
1639 | 1686 | ||
1640 | static void netxen_tx_timeout(struct net_device *netdev) | 1687 | static void netxen_tx_timeout(struct net_device *netdev) |
1641 | { | 1688 | { |
1642 | struct netxen_adapter *adapter = (struct netxen_adapter *) | 1689 | struct netxen_adapter *adapter = netdev_priv(netdev); |
1643 | netdev_priv(netdev); | 1690 | schedule_work(&adapter->tx_timeout_task); |
1644 | SCHEDULE_WORK(&adapter->tx_timeout_task); | ||
1645 | } | 1691 | } |
1646 | 1692 | ||
1647 | static void netxen_tx_timeout_task(struct work_struct *work) | 1693 | static void netxen_tx_timeout_task(struct work_struct *work) |
@@ -1798,9 +1844,6 @@ static int __init netxen_init_module(void) | |||
1798 | { | 1844 | { |
1799 | printk(KERN_INFO "%s\n", netxen_nic_driver_string); | 1845 | printk(KERN_INFO "%s\n", netxen_nic_driver_string); |
1800 | 1846 | ||
1801 | if ((netxen_workq = create_singlethread_workqueue("netxen")) == NULL) | ||
1802 | return -ENOMEM; | ||
1803 | |||
1804 | return pci_register_driver(&netxen_driver); | 1847 | return pci_register_driver(&netxen_driver); |
1805 | } | 1848 | } |
1806 | 1849 | ||
@@ -1809,7 +1852,6 @@ module_init(netxen_init_module); | |||
1809 | static void __exit netxen_exit_module(void) | 1852 | static void __exit netxen_exit_module(void) |
1810 | { | 1853 | { |
1811 | pci_unregister_driver(&netxen_driver); | 1854 | pci_unregister_driver(&netxen_driver); |
1812 | destroy_workqueue(netxen_workq); | ||
1813 | } | 1855 | } |
1814 | 1856 | ||
1815 | module_exit(netxen_exit_module); | 1857 | module_exit(netxen_exit_module); |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 28368157dac4..23e1a0750fe0 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -1611,8 +1611,11 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1611 | if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 | 1611 | if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 |
1612 | && pcnet32_dwio_check(ioaddr)) { | 1612 | && pcnet32_dwio_check(ioaddr)) { |
1613 | a = &pcnet32_dwio; | 1613 | a = &pcnet32_dwio; |
1614 | } else | 1614 | } else { |
1615 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1616 | printk(KERN_ERR PFX "No access methods\n"); | ||
1615 | goto err_release_region; | 1617 | goto err_release_region; |
1618 | } | ||
1616 | } | 1619 | } |
1617 | 1620 | ||
1618 | chip_version = | 1621 | chip_version = |
@@ -1719,7 +1722,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1719 | ret = -ENOMEM; | 1722 | ret = -ENOMEM; |
1720 | goto err_release_region; | 1723 | goto err_release_region; |
1721 | } | 1724 | } |
1722 | SET_NETDEV_DEV(dev, &pdev->dev); | 1725 | |
1726 | if (pdev) | ||
1727 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
1723 | 1728 | ||
1724 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1729 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1725 | printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); | 1730 | printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); |
@@ -1818,7 +1823,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1818 | 1823 | ||
1819 | spin_lock_init(&lp->lock); | 1824 | spin_lock_init(&lp->lock); |
1820 | 1825 | ||
1821 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
1822 | lp->name = chipname; | 1826 | lp->name = chipname; |
1823 | lp->shared_irq = shared; | 1827 | lp->shared_irq = shared; |
1824 | lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ | 1828 | lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ |
@@ -1835,7 +1839,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1835 | lp->chip_version = chip_version; | 1839 | lp->chip_version = chip_version; |
1836 | lp->msg_enable = pcnet32_debug; | 1840 | lp->msg_enable = pcnet32_debug; |
1837 | if ((cards_found >= MAX_UNITS) | 1841 | if ((cards_found >= MAX_UNITS) |
1838 | || (options[cards_found] > sizeof(options_mapping))) | 1842 | || (options[cards_found] >= sizeof(options_mapping))) |
1839 | lp->options = PCNET32_PORT_ASEL; | 1843 | lp->options = PCNET32_PORT_ASEL; |
1840 | else | 1844 | else |
1841 | lp->options = options_mapping[options[cards_found]]; | 1845 | lp->options = options_mapping[options[cards_found]]; |
@@ -1852,12 +1856,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1852 | ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) | 1856 | ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) |
1853 | lp->options |= PCNET32_PORT_FD; | 1857 | lp->options |= PCNET32_PORT_FD; |
1854 | 1858 | ||
1855 | if (!a) { | ||
1856 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1857 | printk(KERN_ERR PFX "No access methods\n"); | ||
1858 | ret = -ENODEV; | ||
1859 | goto err_free_consistent; | ||
1860 | } | ||
1861 | lp->a = *a; | 1859 | lp->a = *a; |
1862 | 1860 | ||
1863 | /* prior to register_netdev, dev->name is not yet correct */ | 1861 | /* prior to register_netdev, dev->name is not yet correct */ |
@@ -1973,14 +1971,13 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1973 | 1971 | ||
1974 | return 0; | 1972 | return 0; |
1975 | 1973 | ||
1976 | err_free_ring: | 1974 | err_free_ring: |
1977 | pcnet32_free_ring(dev); | 1975 | pcnet32_free_ring(dev); |
1978 | err_free_consistent: | ||
1979 | pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), | 1976 | pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), |
1980 | lp->init_block, lp->init_dma_addr); | 1977 | lp->init_block, lp->init_dma_addr); |
1981 | err_free_netdev: | 1978 | err_free_netdev: |
1982 | free_netdev(dev); | 1979 | free_netdev(dev); |
1983 | err_release_region: | 1980 | err_release_region: |
1984 | release_region(ioaddr, PCNET32_TOTAL_SIZE); | 1981 | release_region(ioaddr, PCNET32_TOTAL_SIZE); |
1985 | return ret; | 1982 | return ret; |
1986 | } | 1983 | } |
@@ -2089,6 +2086,7 @@ static void pcnet32_free_ring(struct net_device *dev) | |||
2089 | static int pcnet32_open(struct net_device *dev) | 2086 | static int pcnet32_open(struct net_device *dev) |
2090 | { | 2087 | { |
2091 | struct pcnet32_private *lp = netdev_priv(dev); | 2088 | struct pcnet32_private *lp = netdev_priv(dev); |
2089 | struct pci_dev *pdev = lp->pci_dev; | ||
2092 | unsigned long ioaddr = dev->base_addr; | 2090 | unsigned long ioaddr = dev->base_addr; |
2093 | u16 val; | 2091 | u16 val; |
2094 | int i; | 2092 | int i; |
@@ -2149,9 +2147,9 @@ static int pcnet32_open(struct net_device *dev) | |||
2149 | lp->a.write_csr(ioaddr, 124, val); | 2147 | lp->a.write_csr(ioaddr, 124, val); |
2150 | 2148 | ||
2151 | /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ | 2149 | /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ |
2152 | if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && | 2150 | if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT && |
2153 | (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || | 2151 | (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || |
2154 | lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { | 2152 | pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { |
2155 | if (lp->options & PCNET32_PORT_ASEL) { | 2153 | if (lp->options & PCNET32_PORT_ASEL) { |
2156 | lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; | 2154 | lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; |
2157 | if (netif_msg_link(lp)) | 2155 | if (netif_msg_link(lp)) |
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 639d11bc444e..cd37d739ac74 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -1384,7 +1384,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
1384 | 1384 | ||
1385 | /* create a fragment for each channel */ | 1385 | /* create a fragment for each channel */ |
1386 | bits = B; | 1386 | bits = B; |
1387 | while (nfree > 0 && len > 0) { | 1387 | while (len > 0) { |
1388 | list = list->next; | 1388 | list = list->next; |
1389 | if (list == &ppp->channels) { | 1389 | if (list == &ppp->channels) { |
1390 | i = 0; | 1390 | i = 0; |
@@ -1431,29 +1431,31 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
1431 | *otherwise divide it according to the speed | 1431 | *otherwise divide it according to the speed |
1432 | *of the channel we are going to transmit on | 1432 | *of the channel we are going to transmit on |
1433 | */ | 1433 | */ |
1434 | if (pch->speed == 0) { | 1434 | if (nfree > 0) { |
1435 | flen = totlen/nfree ; | 1435 | if (pch->speed == 0) { |
1436 | if (nbigger > 0) { | 1436 | flen = totlen/nfree ; |
1437 | flen++; | 1437 | if (nbigger > 0) { |
1438 | nbigger--; | 1438 | flen++; |
1439 | } | 1439 | nbigger--; |
1440 | } else { | 1440 | } |
1441 | flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / | 1441 | } else { |
1442 | ((totspeed*totfree)/pch->speed)) - hdrlen; | 1442 | flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / |
1443 | if (nbigger > 0) { | 1443 | ((totspeed*totfree)/pch->speed)) - hdrlen; |
1444 | flen += ((totfree - nzero)*pch->speed)/totspeed; | 1444 | if (nbigger > 0) { |
1445 | nbigger -= ((totfree - nzero)*pch->speed)/ | 1445 | flen += ((totfree - nzero)*pch->speed)/totspeed; |
1446 | nbigger -= ((totfree - nzero)*pch->speed)/ | ||
1446 | totspeed; | 1447 | totspeed; |
1448 | } | ||
1447 | } | 1449 | } |
1450 | nfree--; | ||
1448 | } | 1451 | } |
1449 | nfree--; | ||
1450 | 1452 | ||
1451 | /* | 1453 | /* |
1452 | *check if we are on the last channel or | 1454 | *check if we are on the last channel or |
1453 | *we exceded the lenght of the data to | 1455 | *we exceded the lenght of the data to |
1454 | *fragment | 1456 | *fragment |
1455 | */ | 1457 | */ |
1456 | if ((nfree == 0) || (flen > len)) | 1458 | if ((nfree <= 0) || (flen > len)) |
1457 | flen = len; | 1459 | flen = len; |
1458 | /* | 1460 | /* |
1459 | *it is not worth to tx on slow channels: | 1461 | *it is not worth to tx on slow channels: |
@@ -1467,7 +1469,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
1467 | continue; | 1469 | continue; |
1468 | } | 1470 | } |
1469 | 1471 | ||
1470 | mtu = pch->chan->mtu + 2 - hdrlen; | 1472 | mtu = pch->chan->mtu - hdrlen; |
1471 | if (mtu < 4) | 1473 | if (mtu < 4) |
1472 | mtu = 4; | 1474 | mtu = 4; |
1473 | if (flen > mtu) | 1475 | if (flen > mtu) |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index f0031f1f97e5..5f2090233d7b 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
@@ -1063,6 +1063,7 @@ static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1063 | else { | 1063 | else { |
1064 | int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); | 1064 | int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); |
1065 | 1065 | ||
1066 | po = NULL; | ||
1066 | while (++hash < PPPOE_HASH_SIZE) { | 1067 | while (++hash < PPPOE_HASH_SIZE) { |
1067 | po = pn->hash_table[hash]; | 1068 | po = pn->hash_table[hash]; |
1068 | if (po) | 1069 | if (po) |
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index e7935d09c896..e0f9219a0aea 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c | |||
@@ -2680,6 +2680,7 @@ out_unregister_pppol2tp_proto: | |||
2680 | static void __exit pppol2tp_exit(void) | 2680 | static void __exit pppol2tp_exit(void) |
2681 | { | 2681 | { |
2682 | unregister_pppox_proto(PX_PROTO_OL2TP); | 2682 | unregister_pppox_proto(PX_PROTO_OL2TP); |
2683 | unregister_pernet_gen_device(pppol2tp_net_id, &pppol2tp_net_ops); | ||
2683 | proto_unregister(&pppol2tp_sk_proto); | 2684 | proto_unregister(&pppol2tp_sk_proto); |
2684 | } | 2685 | } |
2685 | 2686 | ||
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c index 5345e47b35ac..4525cbe8dd69 100644 --- a/drivers/net/s6gmac.c +++ b/drivers/net/s6gmac.c | |||
@@ -793,7 +793,7 @@ static inline int s6gmac_phy_start(struct net_device *dev) | |||
793 | struct s6gmac *pd = netdev_priv(dev); | 793 | struct s6gmac *pd = netdev_priv(dev); |
794 | int i = 0; | 794 | int i = 0; |
795 | struct phy_device *p = NULL; | 795 | struct phy_device *p = NULL; |
796 | while ((!(p = pd->mii.bus->phy_map[i])) && (i < PHY_MAX_ADDR)) | 796 | while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i]))) |
797 | i++; | 797 | i++; |
798 | p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0, | 798 | p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0, |
799 | PHY_INTERFACE_MODE_RGMII); | 799 | PHY_INTERFACE_MODE_RGMII); |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 3550c5dcd93c..0a551d8f5d95 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -1488,6 +1488,8 @@ static int sky2_up(struct net_device *dev) | |||
1488 | sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); | 1488 | sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); |
1489 | #endif | 1489 | #endif |
1490 | 1490 | ||
1491 | sky2->restarting = 0; | ||
1492 | |||
1491 | err = sky2_rx_start(sky2); | 1493 | err = sky2_rx_start(sky2); |
1492 | if (err) | 1494 | if (err) |
1493 | goto err_out; | 1495 | goto err_out; |
@@ -1500,6 +1502,9 @@ static int sky2_up(struct net_device *dev) | |||
1500 | 1502 | ||
1501 | sky2_set_multicast(dev); | 1503 | sky2_set_multicast(dev); |
1502 | 1504 | ||
1505 | /* wake queue incase we are restarting */ | ||
1506 | netif_wake_queue(dev); | ||
1507 | |||
1503 | if (netif_msg_ifup(sky2)) | 1508 | if (netif_msg_ifup(sky2)) |
1504 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); | 1509 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); |
1505 | return 0; | 1510 | return 0; |
@@ -1533,6 +1538,8 @@ static inline int tx_dist(unsigned tail, unsigned head) | |||
1533 | /* Number of list elements available for next tx */ | 1538 | /* Number of list elements available for next tx */ |
1534 | static inline int tx_avail(const struct sky2_port *sky2) | 1539 | static inline int tx_avail(const struct sky2_port *sky2) |
1535 | { | 1540 | { |
1541 | if (unlikely(sky2->restarting)) | ||
1542 | return 0; | ||
1536 | return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod); | 1543 | return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod); |
1537 | } | 1544 | } |
1538 | 1545 | ||
@@ -1818,6 +1825,10 @@ static int sky2_down(struct net_device *dev) | |||
1818 | if (netif_msg_ifdown(sky2)) | 1825 | if (netif_msg_ifdown(sky2)) |
1819 | printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); | 1826 | printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); |
1820 | 1827 | ||
1828 | /* explicitly shut off tx incase we're restarting */ | ||
1829 | sky2->restarting = 1; | ||
1830 | netif_tx_disable(dev); | ||
1831 | |||
1821 | /* Force flow control off */ | 1832 | /* Force flow control off */ |
1822 | sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); | 1833 | sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); |
1823 | 1834 | ||
@@ -2359,7 +2370,7 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) | |||
2359 | { | 2370 | { |
2360 | struct sky2_port *sky2 = netdev_priv(dev); | 2371 | struct sky2_port *sky2 = netdev_priv(dev); |
2361 | 2372 | ||
2362 | if (netif_running(dev)) { | 2373 | if (likely(netif_running(dev) && !sky2->restarting)) { |
2363 | netif_tx_lock(dev); | 2374 | netif_tx_lock(dev); |
2364 | sky2_tx_complete(sky2, last); | 2375 | sky2_tx_complete(sky2, last); |
2365 | netif_tx_unlock(dev); | 2376 | netif_tx_unlock(dev); |
@@ -4283,6 +4294,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, | |||
4283 | spin_lock_init(&sky2->phy_lock); | 4294 | spin_lock_init(&sky2->phy_lock); |
4284 | sky2->tx_pending = TX_DEF_PENDING; | 4295 | sky2->tx_pending = TX_DEF_PENDING; |
4285 | sky2->rx_pending = RX_DEF_PENDING; | 4296 | sky2->rx_pending = RX_DEF_PENDING; |
4297 | sky2->restarting = 0; | ||
4286 | 4298 | ||
4287 | hw->dev[port] = dev; | 4299 | hw->dev[port] = dev; |
4288 | 4300 | ||
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index b5549c9e5107..4486b066b43f 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -2051,6 +2051,7 @@ struct sky2_port { | |||
2051 | u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ | 2051 | u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ |
2052 | u8 rx_csum; | 2052 | u8 rx_csum; |
2053 | u8 wol; | 2053 | u8 wol; |
2054 | u8 restarting; | ||
2054 | enum flow_control flow_mode; | 2055 | enum flow_control flow_mode; |
2055 | enum flow_control flow_status; | 2056 | enum flow_control flow_status; |
2056 | 2057 | ||
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 1c70e999cc50..7567f510eff5 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c | |||
@@ -196,21 +196,23 @@ static void PRINT_PKT(u_char *buf, int length) | |||
196 | /* this enables an interrupt in the interrupt mask register */ | 196 | /* this enables an interrupt in the interrupt mask register */ |
197 | #define SMC_ENABLE_INT(lp, x) do { \ | 197 | #define SMC_ENABLE_INT(lp, x) do { \ |
198 | unsigned char mask; \ | 198 | unsigned char mask; \ |
199 | spin_lock_irq(&lp->lock); \ | 199 | unsigned long smc_enable_flags; \ |
200 | spin_lock_irqsave(&lp->lock, smc_enable_flags); \ | ||
200 | mask = SMC_GET_INT_MASK(lp); \ | 201 | mask = SMC_GET_INT_MASK(lp); \ |
201 | mask |= (x); \ | 202 | mask |= (x); \ |
202 | SMC_SET_INT_MASK(lp, mask); \ | 203 | SMC_SET_INT_MASK(lp, mask); \ |
203 | spin_unlock_irq(&lp->lock); \ | 204 | spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \ |
204 | } while (0) | 205 | } while (0) |
205 | 206 | ||
206 | /* this disables an interrupt from the interrupt mask register */ | 207 | /* this disables an interrupt from the interrupt mask register */ |
207 | #define SMC_DISABLE_INT(lp, x) do { \ | 208 | #define SMC_DISABLE_INT(lp, x) do { \ |
208 | unsigned char mask; \ | 209 | unsigned char mask; \ |
209 | spin_lock_irq(&lp->lock); \ | 210 | unsigned long smc_disable_flags; \ |
211 | spin_lock_irqsave(&lp->lock, smc_disable_flags); \ | ||
210 | mask = SMC_GET_INT_MASK(lp); \ | 212 | mask = SMC_GET_INT_MASK(lp); \ |
211 | mask &= ~(x); \ | 213 | mask &= ~(x); \ |
212 | SMC_SET_INT_MASK(lp, mask); \ | 214 | SMC_SET_INT_MASK(lp, mask); \ |
213 | spin_unlock_irq(&lp->lock); \ | 215 | spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \ |
214 | } while (0) | 216 | } while (0) |
215 | 217 | ||
216 | /* | 218 | /* |
@@ -520,21 +522,21 @@ static inline void smc_rcv(struct net_device *dev) | |||
520 | * any other concurrent access and C would always interrupt B. But life | 522 | * any other concurrent access and C would always interrupt B. But life |
521 | * isn't that easy in a SMP world... | 523 | * isn't that easy in a SMP world... |
522 | */ | 524 | */ |
523 | #define smc_special_trylock(lock) \ | 525 | #define smc_special_trylock(lock, flags) \ |
524 | ({ \ | 526 | ({ \ |
525 | int __ret; \ | 527 | int __ret; \ |
526 | local_irq_disable(); \ | 528 | local_irq_save(flags); \ |
527 | __ret = spin_trylock(lock); \ | 529 | __ret = spin_trylock(lock); \ |
528 | if (!__ret) \ | 530 | if (!__ret) \ |
529 | local_irq_enable(); \ | 531 | local_irq_restore(flags); \ |
530 | __ret; \ | 532 | __ret; \ |
531 | }) | 533 | }) |
532 | #define smc_special_lock(lock) spin_lock_irq(lock) | 534 | #define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags) |
533 | #define smc_special_unlock(lock) spin_unlock_irq(lock) | 535 | #define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags) |
534 | #else | 536 | #else |
535 | #define smc_special_trylock(lock) (1) | 537 | #define smc_special_trylock(lock, flags) (1) |
536 | #define smc_special_lock(lock) do { } while (0) | 538 | #define smc_special_lock(lock, flags) do { } while (0) |
537 | #define smc_special_unlock(lock) do { } while (0) | 539 | #define smc_special_unlock(lock, flags) do { } while (0) |
538 | #endif | 540 | #endif |
539 | 541 | ||
540 | /* | 542 | /* |
@@ -548,10 +550,11 @@ static void smc_hardware_send_pkt(unsigned long data) | |||
548 | struct sk_buff *skb; | 550 | struct sk_buff *skb; |
549 | unsigned int packet_no, len; | 551 | unsigned int packet_no, len; |
550 | unsigned char *buf; | 552 | unsigned char *buf; |
553 | unsigned long flags; | ||
551 | 554 | ||
552 | DBG(3, "%s: %s\n", dev->name, __func__); | 555 | DBG(3, "%s: %s\n", dev->name, __func__); |
553 | 556 | ||
554 | if (!smc_special_trylock(&lp->lock)) { | 557 | if (!smc_special_trylock(&lp->lock, flags)) { |
555 | netif_stop_queue(dev); | 558 | netif_stop_queue(dev); |
556 | tasklet_schedule(&lp->tx_task); | 559 | tasklet_schedule(&lp->tx_task); |
557 | return; | 560 | return; |
@@ -559,7 +562,7 @@ static void smc_hardware_send_pkt(unsigned long data) | |||
559 | 562 | ||
560 | skb = lp->pending_tx_skb; | 563 | skb = lp->pending_tx_skb; |
561 | if (unlikely(!skb)) { | 564 | if (unlikely(!skb)) { |
562 | smc_special_unlock(&lp->lock); | 565 | smc_special_unlock(&lp->lock, flags); |
563 | return; | 566 | return; |
564 | } | 567 | } |
565 | lp->pending_tx_skb = NULL; | 568 | lp->pending_tx_skb = NULL; |
@@ -569,7 +572,7 @@ static void smc_hardware_send_pkt(unsigned long data) | |||
569 | printk("%s: Memory allocation failed.\n", dev->name); | 572 | printk("%s: Memory allocation failed.\n", dev->name); |
570 | dev->stats.tx_errors++; | 573 | dev->stats.tx_errors++; |
571 | dev->stats.tx_fifo_errors++; | 574 | dev->stats.tx_fifo_errors++; |
572 | smc_special_unlock(&lp->lock); | 575 | smc_special_unlock(&lp->lock, flags); |
573 | goto done; | 576 | goto done; |
574 | } | 577 | } |
575 | 578 | ||
@@ -608,7 +611,7 @@ static void smc_hardware_send_pkt(unsigned long data) | |||
608 | 611 | ||
609 | /* queue the packet for TX */ | 612 | /* queue the packet for TX */ |
610 | SMC_SET_MMU_CMD(lp, MC_ENQUEUE); | 613 | SMC_SET_MMU_CMD(lp, MC_ENQUEUE); |
611 | smc_special_unlock(&lp->lock); | 614 | smc_special_unlock(&lp->lock, flags); |
612 | 615 | ||
613 | dev->trans_start = jiffies; | 616 | dev->trans_start = jiffies; |
614 | dev->stats.tx_packets++; | 617 | dev->stats.tx_packets++; |
@@ -633,6 +636,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
633 | struct smc_local *lp = netdev_priv(dev); | 636 | struct smc_local *lp = netdev_priv(dev); |
634 | void __iomem *ioaddr = lp->base; | 637 | void __iomem *ioaddr = lp->base; |
635 | unsigned int numPages, poll_count, status; | 638 | unsigned int numPages, poll_count, status; |
639 | unsigned long flags; | ||
636 | 640 | ||
637 | DBG(3, "%s: %s\n", dev->name, __func__); | 641 | DBG(3, "%s: %s\n", dev->name, __func__); |
638 | 642 | ||
@@ -658,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
658 | return 0; | 662 | return 0; |
659 | } | 663 | } |
660 | 664 | ||
661 | smc_special_lock(&lp->lock); | 665 | smc_special_lock(&lp->lock, flags); |
662 | 666 | ||
663 | /* now, try to allocate the memory */ | 667 | /* now, try to allocate the memory */ |
664 | SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages); | 668 | SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages); |
@@ -676,7 +680,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
676 | } | 680 | } |
677 | } while (--poll_count); | 681 | } while (--poll_count); |
678 | 682 | ||
679 | smc_special_unlock(&lp->lock); | 683 | smc_special_unlock(&lp->lock, flags); |
680 | 684 | ||
681 | lp->pending_tx_skb = skb; | 685 | lp->pending_tx_skb = skb; |
682 | if (!poll_count) { | 686 | if (!poll_count) { |
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index eb72d2e9ab3d..acfdccd44567 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c | |||
@@ -5059,7 +5059,7 @@ mii_get_phy(struct net_device *dev) | |||
5059 | if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ | 5059 | if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ |
5060 | for (j=0; j<limit; j++) { /* Search PHY table */ | 5060 | for (j=0; j<limit; j++) { /* Search PHY table */ |
5061 | if (id != phy_info[j].id) continue; /* ID match? */ | 5061 | if (id != phy_info[j].id) continue; /* ID match? */ |
5062 | for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++); | 5062 | for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); |
5063 | if (k < DE4X5_MAX_PHY) { | 5063 | if (k < DE4X5_MAX_PHY) { |
5064 | memcpy((char *)&lp->phy[k], | 5064 | memcpy((char *)&lp->phy[k], |
5065 | (char *)&phy_info[j], sizeof(struct phy_table)); | 5065 | (char *)&phy_info[j], sizeof(struct phy_table)); |
@@ -5072,7 +5072,7 @@ mii_get_phy(struct net_device *dev) | |||
5072 | break; | 5072 | break; |
5073 | } | 5073 | } |
5074 | if ((j == limit) && (i < DE4X5_MAX_MII)) { | 5074 | if ((j == limit) && (i < DE4X5_MAX_MII)) { |
5075 | for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++); | 5075 | for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); |
5076 | lp->phy[k].addr = i; | 5076 | lp->phy[k].addr = i; |
5077 | lp->phy[k].id = id; | 5077 | lp->phy[k].id = id; |
5078 | lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ | 5078 | lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ |
@@ -5091,7 +5091,7 @@ mii_get_phy(struct net_device *dev) | |||
5091 | purgatory: | 5091 | purgatory: |
5092 | lp->active = 0; | 5092 | lp->active = 0; |
5093 | if (lp->phy[0].id) { /* Reset the PHY devices */ | 5093 | if (lp->phy[0].id) { /* Reset the PHY devices */ |
5094 | for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/ | 5094 | for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/ |
5095 | mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); | 5095 | mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); |
5096 | while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); | 5096 | while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); |
5097 | 5097 | ||
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 99a63649f4fc..4cf9a6588751 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -652,8 +652,9 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
652 | int entry; | 652 | int entry; |
653 | u32 flag; | 653 | u32 flag; |
654 | dma_addr_t mapping; | 654 | dma_addr_t mapping; |
655 | unsigned long flags; | ||
655 | 656 | ||
656 | spin_lock_irq(&tp->lock); | 657 | spin_lock_irqsave(&tp->lock, flags); |
657 | 658 | ||
658 | /* Calculate the next Tx descriptor entry. */ | 659 | /* Calculate the next Tx descriptor entry. */ |
659 | entry = tp->cur_tx % TX_RING_SIZE; | 660 | entry = tp->cur_tx % TX_RING_SIZE; |
@@ -688,7 +689,7 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
688 | /* Trigger an immediate transmit demand. */ | 689 | /* Trigger an immediate transmit demand. */ |
689 | iowrite32(0, tp->base_addr + CSR1); | 690 | iowrite32(0, tp->base_addr + CSR1); |
690 | 691 | ||
691 | spin_unlock_irq(&tp->lock); | 692 | spin_unlock_irqrestore(&tp->lock, flags); |
692 | 693 | ||
693 | dev->trans_start = jiffies; | 694 | dev->trans_start = jiffies; |
694 | 695 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 027f7aba26af..42b6c6319bc2 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1048,20 +1048,15 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
1048 | return err; | 1048 | return err; |
1049 | } | 1049 | } |
1050 | 1050 | ||
1051 | static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr) | 1051 | static int tun_get_iff(struct net *net, struct tun_struct *tun, |
1052 | struct ifreq *ifr) | ||
1052 | { | 1053 | { |
1053 | struct tun_struct *tun = tun_get(file); | ||
1054 | |||
1055 | if (!tun) | ||
1056 | return -EBADFD; | ||
1057 | |||
1058 | DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name); | 1054 | DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name); |
1059 | 1055 | ||
1060 | strcpy(ifr->ifr_name, tun->dev->name); | 1056 | strcpy(ifr->ifr_name, tun->dev->name); |
1061 | 1057 | ||
1062 | ifr->ifr_flags = tun_flags(tun); | 1058 | ifr->ifr_flags = tun_flags(tun); |
1063 | 1059 | ||
1064 | tun_put(tun); | ||
1065 | return 0; | 1060 | return 0; |
1066 | } | 1061 | } |
1067 | 1062 | ||
@@ -1105,8 +1100,8 @@ static int set_offload(struct net_device *dev, unsigned long arg) | |||
1105 | return 0; | 1100 | return 0; |
1106 | } | 1101 | } |
1107 | 1102 | ||
1108 | static int tun_chr_ioctl(struct inode *inode, struct file *file, | 1103 | static long tun_chr_ioctl(struct file *file, unsigned int cmd, |
1109 | unsigned int cmd, unsigned long arg) | 1104 | unsigned long arg) |
1110 | { | 1105 | { |
1111 | struct tun_file *tfile = file->private_data; | 1106 | struct tun_file *tfile = file->private_data; |
1112 | struct tun_struct *tun; | 1107 | struct tun_struct *tun; |
@@ -1128,34 +1123,32 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
1128 | (unsigned int __user*)argp); | 1123 | (unsigned int __user*)argp); |
1129 | } | 1124 | } |
1130 | 1125 | ||
1126 | rtnl_lock(); | ||
1127 | |||
1131 | tun = __tun_get(tfile); | 1128 | tun = __tun_get(tfile); |
1132 | if (cmd == TUNSETIFF && !tun) { | 1129 | if (cmd == TUNSETIFF && !tun) { |
1133 | int err; | ||
1134 | |||
1135 | ifr.ifr_name[IFNAMSIZ-1] = '\0'; | 1130 | ifr.ifr_name[IFNAMSIZ-1] = '\0'; |
1136 | 1131 | ||
1137 | rtnl_lock(); | 1132 | ret = tun_set_iff(tfile->net, file, &ifr); |
1138 | err = tun_set_iff(tfile->net, file, &ifr); | ||
1139 | rtnl_unlock(); | ||
1140 | 1133 | ||
1141 | if (err) | 1134 | if (ret) |
1142 | return err; | 1135 | goto unlock; |
1143 | 1136 | ||
1144 | if (copy_to_user(argp, &ifr, sizeof(ifr))) | 1137 | if (copy_to_user(argp, &ifr, sizeof(ifr))) |
1145 | return -EFAULT; | 1138 | ret = -EFAULT; |
1146 | return 0; | 1139 | goto unlock; |
1147 | } | 1140 | } |
1148 | 1141 | ||
1149 | 1142 | ret = -EBADFD; | |
1150 | if (!tun) | 1143 | if (!tun) |
1151 | return -EBADFD; | 1144 | goto unlock; |
1152 | 1145 | ||
1153 | DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); | 1146 | DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); |
1154 | 1147 | ||
1155 | ret = 0; | 1148 | ret = 0; |
1156 | switch (cmd) { | 1149 | switch (cmd) { |
1157 | case TUNGETIFF: | 1150 | case TUNGETIFF: |
1158 | ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr); | 1151 | ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr); |
1159 | if (ret) | 1152 | if (ret) |
1160 | break; | 1153 | break; |
1161 | 1154 | ||
@@ -1201,7 +1194,6 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
1201 | 1194 | ||
1202 | case TUNSETLINK: | 1195 | case TUNSETLINK: |
1203 | /* Only allow setting the type when the interface is down */ | 1196 | /* Only allow setting the type when the interface is down */ |
1204 | rtnl_lock(); | ||
1205 | if (tun->dev->flags & IFF_UP) { | 1197 | if (tun->dev->flags & IFF_UP) { |
1206 | DBG(KERN_INFO "%s: Linktype set failed because interface is up\n", | 1198 | DBG(KERN_INFO "%s: Linktype set failed because interface is up\n", |
1207 | tun->dev->name); | 1199 | tun->dev->name); |
@@ -1211,7 +1203,6 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
1211 | DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type); | 1203 | DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type); |
1212 | ret = 0; | 1204 | ret = 0; |
1213 | } | 1205 | } |
1214 | rtnl_unlock(); | ||
1215 | break; | 1206 | break; |
1216 | 1207 | ||
1217 | #ifdef TUN_DEBUG | 1208 | #ifdef TUN_DEBUG |
@@ -1220,9 +1211,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
1220 | break; | 1211 | break; |
1221 | #endif | 1212 | #endif |
1222 | case TUNSETOFFLOAD: | 1213 | case TUNSETOFFLOAD: |
1223 | rtnl_lock(); | ||
1224 | ret = set_offload(tun->dev, arg); | 1214 | ret = set_offload(tun->dev, arg); |
1225 | rtnl_unlock(); | ||
1226 | break; | 1215 | break; |
1227 | 1216 | ||
1228 | case TUNSETTXFILTER: | 1217 | case TUNSETTXFILTER: |
@@ -1230,9 +1219,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
1230 | ret = -EINVAL; | 1219 | ret = -EINVAL; |
1231 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) | 1220 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) |
1232 | break; | 1221 | break; |
1233 | rtnl_lock(); | ||
1234 | ret = update_filter(&tun->txflt, (void __user *)arg); | 1222 | ret = update_filter(&tun->txflt, (void __user *)arg); |
1235 | rtnl_unlock(); | ||
1236 | break; | 1223 | break; |
1237 | 1224 | ||
1238 | case SIOCGIFHWADDR: | 1225 | case SIOCGIFHWADDR: |
@@ -1248,9 +1235,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
1248 | DBG(KERN_DEBUG "%s: set hw address: %pM\n", | 1235 | DBG(KERN_DEBUG "%s: set hw address: %pM\n", |
1249 | tun->dev->name, ifr.ifr_hwaddr.sa_data); | 1236 | tun->dev->name, ifr.ifr_hwaddr.sa_data); |
1250 | 1237 | ||
1251 | rtnl_lock(); | ||
1252 | ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); | 1238 | ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); |
1253 | rtnl_unlock(); | ||
1254 | break; | 1239 | break; |
1255 | 1240 | ||
1256 | case TUNGETSNDBUF: | 1241 | case TUNGETSNDBUF: |
@@ -1273,7 +1258,10 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
1273 | break; | 1258 | break; |
1274 | }; | 1259 | }; |
1275 | 1260 | ||
1276 | tun_put(tun); | 1261 | unlock: |
1262 | rtnl_unlock(); | ||
1263 | if (tun) | ||
1264 | tun_put(tun); | ||
1277 | return ret; | 1265 | return ret; |
1278 | } | 1266 | } |
1279 | 1267 | ||
@@ -1361,7 +1349,7 @@ static const struct file_operations tun_fops = { | |||
1361 | .write = do_sync_write, | 1349 | .write = do_sync_write, |
1362 | .aio_write = tun_chr_aio_write, | 1350 | .aio_write = tun_chr_aio_write, |
1363 | .poll = tun_chr_poll, | 1351 | .poll = tun_chr_poll, |
1364 | .ioctl = tun_chr_ioctl, | 1352 | .unlocked_ioctl = tun_chr_ioctl, |
1365 | .open = tun_chr_open, | 1353 | .open = tun_chr_open, |
1366 | .release = tun_chr_close, | 1354 | .release = tun_chr_close, |
1367 | .fasync = tun_chr_fasync | 1355 | .fasync = tun_chr_fasync |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 3b957e6412ee..8a7b8c7bd781 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -3111,10 +3111,11 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3111 | u8 __iomem *bd; /* BD pointer */ | 3111 | u8 __iomem *bd; /* BD pointer */ |
3112 | u32 bd_status; | 3112 | u32 bd_status; |
3113 | u8 txQ = 0; | 3113 | u8 txQ = 0; |
3114 | unsigned long flags; | ||
3114 | 3115 | ||
3115 | ugeth_vdbg("%s: IN", __func__); | 3116 | ugeth_vdbg("%s: IN", __func__); |
3116 | 3117 | ||
3117 | spin_lock_irq(&ugeth->lock); | 3118 | spin_lock_irqsave(&ugeth->lock, flags); |
3118 | 3119 | ||
3119 | dev->stats.tx_bytes += skb->len; | 3120 | dev->stats.tx_bytes += skb->len; |
3120 | 3121 | ||
@@ -3171,7 +3172,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3171 | uccf = ugeth->uccf; | 3172 | uccf = ugeth->uccf; |
3172 | out_be16(uccf->p_utodr, UCC_FAST_TOD); | 3173 | out_be16(uccf->p_utodr, UCC_FAST_TOD); |
3173 | #endif | 3174 | #endif |
3174 | spin_unlock_irq(&ugeth->lock); | 3175 | spin_unlock_irqrestore(&ugeth->lock, flags); |
3175 | 3176 | ||
3176 | return 0; | 3177 | return 0; |
3177 | } | 3178 | } |
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h index c7467823cd1c..f968c834ff63 100644 --- a/drivers/net/usb/pegasus.h +++ b/drivers/net/usb/pegasus.h | |||
@@ -250,6 +250,8 @@ PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904, | |||
250 | DEFAULT_GPIO_RESET ) | 250 | DEFAULT_GPIO_RESET ) |
251 | PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913, | 251 | PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913, |
252 | DEFAULT_GPIO_RESET | PEGASUS_II ) | 252 | DEFAULT_GPIO_RESET | PEGASUS_II ) |
253 | PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x092a, | ||
254 | DEFAULT_GPIO_RESET | PEGASUS_II ) | ||
253 | PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a, | 255 | PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a, |
254 | DEFAULT_GPIO_RESET) | 256 | DEFAULT_GPIO_RESET) |
255 | PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002, | 257 | PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002, |
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 88c30a58b4bd..934f7671650a 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
@@ -1218,6 +1218,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1218 | struct rhine_private *rp = netdev_priv(dev); | 1218 | struct rhine_private *rp = netdev_priv(dev); |
1219 | void __iomem *ioaddr = rp->base; | 1219 | void __iomem *ioaddr = rp->base; |
1220 | unsigned entry; | 1220 | unsigned entry; |
1221 | unsigned long flags; | ||
1221 | 1222 | ||
1222 | /* Caution: the write order is important here, set the field | 1223 | /* Caution: the write order is important here, set the field |
1223 | with the "ownership" bits last. */ | 1224 | with the "ownership" bits last. */ |
@@ -1261,7 +1262,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1261 | cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); | 1262 | cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); |
1262 | 1263 | ||
1263 | /* lock eth irq */ | 1264 | /* lock eth irq */ |
1264 | spin_lock_irq(&rp->lock); | 1265 | spin_lock_irqsave(&rp->lock, flags); |
1265 | wmb(); | 1266 | wmb(); |
1266 | rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); | 1267 | rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); |
1267 | wmb(); | 1268 | wmb(); |
@@ -1280,7 +1281,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1280 | 1281 | ||
1281 | dev->trans_start = jiffies; | 1282 | dev->trans_start = jiffies; |
1282 | 1283 | ||
1283 | spin_unlock_irq(&rp->lock); | 1284 | spin_unlock_irqrestore(&rp->lock, flags); |
1284 | 1285 | ||
1285 | if (debug > 4) { | 1286 | if (debug > 4) { |
1286 | printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", | 1287 | printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 3ba35956327a..cee08a1e497a 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -1778,7 +1778,7 @@ static void velocity_error(struct velocity_info *vptr, int status) | |||
1778 | * mode | 1778 | * mode |
1779 | */ | 1779 | */ |
1780 | if (vptr->rev_id < REV_ID_VT3216_A0) { | 1780 | if (vptr->rev_id < REV_ID_VT3216_A0) { |
1781 | if (vptr->mii_status | VELOCITY_DUPLEX_FULL) | 1781 | if (vptr->mii_status & VELOCITY_DUPLEX_FULL) |
1782 | BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); | 1782 | BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); |
1783 | else | 1783 | else |
1784 | BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); | 1784 | BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2a6e81d5b579..bbedf03a2124 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -70,6 +70,9 @@ struct virtnet_info | |||
70 | struct sk_buff_head recv; | 70 | struct sk_buff_head recv; |
71 | struct sk_buff_head send; | 71 | struct sk_buff_head send; |
72 | 72 | ||
73 | /* Work struct for refilling if we run low on memory. */ | ||
74 | struct delayed_work refill; | ||
75 | |||
73 | /* Chain pages by the private ptr. */ | 76 | /* Chain pages by the private ptr. */ |
74 | struct page *pages; | 77 | struct page *pages; |
75 | }; | 78 | }; |
@@ -273,19 +276,22 @@ drop: | |||
273 | dev_kfree_skb(skb); | 276 | dev_kfree_skb(skb); |
274 | } | 277 | } |
275 | 278 | ||
276 | static void try_fill_recv_maxbufs(struct virtnet_info *vi) | 279 | static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp) |
277 | { | 280 | { |
278 | struct sk_buff *skb; | 281 | struct sk_buff *skb; |
279 | struct scatterlist sg[2+MAX_SKB_FRAGS]; | 282 | struct scatterlist sg[2+MAX_SKB_FRAGS]; |
280 | int num, err, i; | 283 | int num, err, i; |
284 | bool oom = false; | ||
281 | 285 | ||
282 | sg_init_table(sg, 2+MAX_SKB_FRAGS); | 286 | sg_init_table(sg, 2+MAX_SKB_FRAGS); |
283 | for (;;) { | 287 | for (;;) { |
284 | struct virtio_net_hdr *hdr; | 288 | struct virtio_net_hdr *hdr; |
285 | 289 | ||
286 | skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); | 290 | skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); |
287 | if (unlikely(!skb)) | 291 | if (unlikely(!skb)) { |
292 | oom = true; | ||
288 | break; | 293 | break; |
294 | } | ||
289 | 295 | ||
290 | skb_reserve(skb, NET_IP_ALIGN); | 296 | skb_reserve(skb, NET_IP_ALIGN); |
291 | skb_put(skb, MAX_PACKET_LEN); | 297 | skb_put(skb, MAX_PACKET_LEN); |
@@ -296,7 +302,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi) | |||
296 | if (vi->big_packets) { | 302 | if (vi->big_packets) { |
297 | for (i = 0; i < MAX_SKB_FRAGS; i++) { | 303 | for (i = 0; i < MAX_SKB_FRAGS; i++) { |
298 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; | 304 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
299 | f->page = get_a_page(vi, GFP_ATOMIC); | 305 | f->page = get_a_page(vi, gfp); |
300 | if (!f->page) | 306 | if (!f->page) |
301 | break; | 307 | break; |
302 | 308 | ||
@@ -325,31 +331,35 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi) | |||
325 | if (unlikely(vi->num > vi->max)) | 331 | if (unlikely(vi->num > vi->max)) |
326 | vi->max = vi->num; | 332 | vi->max = vi->num; |
327 | vi->rvq->vq_ops->kick(vi->rvq); | 333 | vi->rvq->vq_ops->kick(vi->rvq); |
334 | return !oom; | ||
328 | } | 335 | } |
329 | 336 | ||
330 | static void try_fill_recv(struct virtnet_info *vi) | 337 | /* Returns false if we couldn't fill entirely (OOM). */ |
338 | static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) | ||
331 | { | 339 | { |
332 | struct sk_buff *skb; | 340 | struct sk_buff *skb; |
333 | struct scatterlist sg[1]; | 341 | struct scatterlist sg[1]; |
334 | int err; | 342 | int err; |
343 | bool oom = false; | ||
335 | 344 | ||
336 | if (!vi->mergeable_rx_bufs) { | 345 | if (!vi->mergeable_rx_bufs) |
337 | try_fill_recv_maxbufs(vi); | 346 | return try_fill_recv_maxbufs(vi, gfp); |
338 | return; | ||
339 | } | ||
340 | 347 | ||
341 | for (;;) { | 348 | for (;;) { |
342 | skb_frag_t *f; | 349 | skb_frag_t *f; |
343 | 350 | ||
344 | skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); | 351 | skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); |
345 | if (unlikely(!skb)) | 352 | if (unlikely(!skb)) { |
353 | oom = true; | ||
346 | break; | 354 | break; |
355 | } | ||
347 | 356 | ||
348 | skb_reserve(skb, NET_IP_ALIGN); | 357 | skb_reserve(skb, NET_IP_ALIGN); |
349 | 358 | ||
350 | f = &skb_shinfo(skb)->frags[0]; | 359 | f = &skb_shinfo(skb)->frags[0]; |
351 | f->page = get_a_page(vi, GFP_ATOMIC); | 360 | f->page = get_a_page(vi, gfp); |
352 | if (!f->page) { | 361 | if (!f->page) { |
362 | oom = true; | ||
353 | kfree_skb(skb); | 363 | kfree_skb(skb); |
354 | break; | 364 | break; |
355 | } | 365 | } |
@@ -373,6 +383,7 @@ static void try_fill_recv(struct virtnet_info *vi) | |||
373 | if (unlikely(vi->num > vi->max)) | 383 | if (unlikely(vi->num > vi->max)) |
374 | vi->max = vi->num; | 384 | vi->max = vi->num; |
375 | vi->rvq->vq_ops->kick(vi->rvq); | 385 | vi->rvq->vq_ops->kick(vi->rvq); |
386 | return !oom; | ||
376 | } | 387 | } |
377 | 388 | ||
378 | static void skb_recv_done(struct virtqueue *rvq) | 389 | static void skb_recv_done(struct virtqueue *rvq) |
@@ -385,6 +396,23 @@ static void skb_recv_done(struct virtqueue *rvq) | |||
385 | } | 396 | } |
386 | } | 397 | } |
387 | 398 | ||
399 | static void refill_work(struct work_struct *work) | ||
400 | { | ||
401 | struct virtnet_info *vi; | ||
402 | bool still_empty; | ||
403 | |||
404 | vi = container_of(work, struct virtnet_info, refill.work); | ||
405 | napi_disable(&vi->napi); | ||
406 | try_fill_recv(vi, GFP_KERNEL); | ||
407 | still_empty = (vi->num == 0); | ||
408 | napi_enable(&vi->napi); | ||
409 | |||
410 | /* In theory, this can happen: if we don't get any buffers in | ||
411 | * we will *never* try to fill again. */ | ||
412 | if (still_empty) | ||
413 | schedule_delayed_work(&vi->refill, HZ/2); | ||
414 | } | ||
415 | |||
388 | static int virtnet_poll(struct napi_struct *napi, int budget) | 416 | static int virtnet_poll(struct napi_struct *napi, int budget) |
389 | { | 417 | { |
390 | struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); | 418 | struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); |
@@ -400,10 +428,10 @@ again: | |||
400 | received++; | 428 | received++; |
401 | } | 429 | } |
402 | 430 | ||
403 | /* FIXME: If we oom and completely run out of inbufs, we need | 431 | if (vi->num < vi->max / 2) { |
404 | * to start a timer trying to fill more. */ | 432 | if (!try_fill_recv(vi, GFP_ATOMIC)) |
405 | if (vi->num < vi->max / 2) | 433 | schedule_delayed_work(&vi->refill, 0); |
406 | try_fill_recv(vi); | 434 | } |
407 | 435 | ||
408 | /* Out of packets? */ | 436 | /* Out of packets? */ |
409 | if (received < budget) { | 437 | if (received < budget) { |
@@ -893,6 +921,7 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
893 | vi->vdev = vdev; | 921 | vi->vdev = vdev; |
894 | vdev->priv = vi; | 922 | vdev->priv = vi; |
895 | vi->pages = NULL; | 923 | vi->pages = NULL; |
924 | INIT_DELAYED_WORK(&vi->refill, refill_work); | ||
896 | 925 | ||
897 | /* If they give us a callback when all buffers are done, we don't need | 926 | /* If they give us a callback when all buffers are done, we don't need |
898 | * the timer. */ | 927 | * the timer. */ |
@@ -941,7 +970,7 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
941 | } | 970 | } |
942 | 971 | ||
943 | /* Last of all, set up some receive buffers. */ | 972 | /* Last of all, set up some receive buffers. */ |
944 | try_fill_recv(vi); | 973 | try_fill_recv(vi, GFP_KERNEL); |
945 | 974 | ||
946 | /* If we didn't even get one input buffer, we're useless. */ | 975 | /* If we didn't even get one input buffer, we're useless. */ |
947 | if (vi->num == 0) { | 976 | if (vi->num == 0) { |
@@ -958,6 +987,7 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
958 | 987 | ||
959 | unregister: | 988 | unregister: |
960 | unregister_netdev(dev); | 989 | unregister_netdev(dev); |
990 | cancel_delayed_work_sync(&vi->refill); | ||
961 | free_vqs: | 991 | free_vqs: |
962 | vdev->config->del_vqs(vdev); | 992 | vdev->config->del_vqs(vdev); |
963 | free: | 993 | free: |
@@ -986,6 +1016,7 @@ static void virtnet_remove(struct virtio_device *vdev) | |||
986 | BUG_ON(vi->num != 0); | 1016 | BUG_ON(vi->num != 0); |
987 | 1017 | ||
988 | unregister_netdev(vi->dev); | 1018 | unregister_netdev(vi->dev); |
1019 | cancel_delayed_work_sync(&vi->refill); | ||
989 | 1020 | ||
990 | vdev->config->del_vqs(vi->vdev); | 1021 | vdev->config->del_vqs(vi->vdev); |
991 | 1022 | ||
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index c70604f0329e..8ce5e4cee168 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -5918,20 +5918,19 @@ static int airo_set_essid(struct net_device *dev, | |||
5918 | readSsidRid(local, &SSID_rid); | 5918 | readSsidRid(local, &SSID_rid); |
5919 | 5919 | ||
5920 | /* Check if we asked for `any' */ | 5920 | /* Check if we asked for `any' */ |
5921 | if(dwrq->flags == 0) { | 5921 | if (dwrq->flags == 0) { |
5922 | /* Just send an empty SSID list */ | 5922 | /* Just send an empty SSID list */ |
5923 | memset(&SSID_rid, 0, sizeof(SSID_rid)); | 5923 | memset(&SSID_rid, 0, sizeof(SSID_rid)); |
5924 | } else { | 5924 | } else { |
5925 | int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; | 5925 | unsigned index = (dwrq->flags & IW_ENCODE_INDEX) - 1; |
5926 | 5926 | ||
5927 | /* Check the size of the string */ | 5927 | /* Check the size of the string */ |
5928 | if(dwrq->length > IW_ESSID_MAX_SIZE) { | 5928 | if (dwrq->length > IW_ESSID_MAX_SIZE) |
5929 | return -E2BIG ; | 5929 | return -E2BIG ; |
5930 | } | 5930 | |
5931 | /* Check if index is valid */ | 5931 | /* Check if index is valid */ |
5932 | if((index < 0) || (index >= 4)) { | 5932 | if (index >= ARRAY_SIZE(SSID_rid.ssids)) |
5933 | return -EINVAL; | 5933 | return -EINVAL; |
5934 | } | ||
5935 | 5934 | ||
5936 | /* Set the SSID */ | 5935 | /* Set the SSID */ |
5937 | memset(SSID_rid.ssids[index].ssid, 0, | 5936 | memset(SSID_rid.ssids[index].ssid, 0, |
@@ -6819,7 +6818,7 @@ static int airo_set_txpow(struct net_device *dev, | |||
6819 | return -EINVAL; | 6818 | return -EINVAL; |
6820 | } | 6819 | } |
6821 | clear_bit (FLAG_RADIO_OFF, &local->flags); | 6820 | clear_bit (FLAG_RADIO_OFF, &local->flags); |
6822 | for (i = 0; cap_rid.txPowerLevels[i] && (i < 8); i++) | 6821 | for (i = 0; i < 8 && cap_rid.txPowerLevels[i]; i++) |
6823 | if (v == cap_rid.txPowerLevels[i]) { | 6822 | if (v == cap_rid.txPowerLevels[i]) { |
6824 | readConfigRid(local, 1); | 6823 | readConfigRid(local, 1); |
6825 | local->config.txPower = v; | 6824 | local->config.txPower = v; |
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c index 9d38cf60a0db..88c3d8573869 100644 --- a/drivers/net/wireless/ath/ar9170/main.c +++ b/drivers/net/wireless/ath/ar9170/main.c | |||
@@ -1967,13 +1967,14 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue, | |||
1967 | int ret; | 1967 | int ret; |
1968 | 1968 | ||
1969 | mutex_lock(&ar->mutex); | 1969 | mutex_lock(&ar->mutex); |
1970 | if ((param) && !(queue > __AR9170_NUM_TXQ)) { | 1970 | if (queue < __AR9170_NUM_TXQ) { |
1971 | memcpy(&ar->edcf[ar9170_qos_hwmap[queue]], | 1971 | memcpy(&ar->edcf[ar9170_qos_hwmap[queue]], |
1972 | param, sizeof(*param)); | 1972 | param, sizeof(*param)); |
1973 | 1973 | ||
1974 | ret = ar9170_set_qos(ar); | 1974 | ret = ar9170_set_qos(ar); |
1975 | } else | 1975 | } else { |
1976 | ret = -EINVAL; | 1976 | ret = -EINVAL; |
1977 | } | ||
1977 | 1978 | ||
1978 | mutex_unlock(&ar->mutex); | 1979 | mutex_unlock(&ar->mutex); |
1979 | return ret; | 1980 | return ret; |
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c index 754b1f8d8da9..007eb85fc67e 100644 --- a/drivers/net/wireless/ath/ar9170/usb.c +++ b/drivers/net/wireless/ath/ar9170/usb.c | |||
@@ -598,11 +598,15 @@ static int ar9170_usb_request_firmware(struct ar9170_usb *aru) | |||
598 | 598 | ||
599 | err = request_firmware(&aru->init_values, "ar9170-1.fw", | 599 | err = request_firmware(&aru->init_values, "ar9170-1.fw", |
600 | &aru->udev->dev); | 600 | &aru->udev->dev); |
601 | if (err) { | ||
602 | dev_err(&aru->udev->dev, "file with init values not found.\n"); | ||
603 | return err; | ||
604 | } | ||
601 | 605 | ||
602 | err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev); | 606 | err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev); |
603 | if (err) { | 607 | if (err) { |
604 | release_firmware(aru->init_values); | 608 | release_firmware(aru->init_values); |
605 | dev_err(&aru->udev->dev, "file with init values not found.\n"); | 609 | dev_err(&aru->udev->dev, "firmware file not found.\n"); |
606 | return err; | 610 | return err; |
607 | } | 611 | } |
608 | 612 | ||
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index a2fda702b620..ce0e86c36a82 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c | |||
@@ -460,7 +460,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) | |||
460 | integer = swab32(eep->modalHeader.antCtrlCommon); | 460 | integer = swab32(eep->modalHeader.antCtrlCommon); |
461 | eep->modalHeader.antCtrlCommon = integer; | 461 | eep->modalHeader.antCtrlCommon = integer; |
462 | 462 | ||
463 | for (i = 0; i < AR5416_MAX_CHAINS; i++) { | 463 | for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { |
464 | integer = swab32(eep->modalHeader.antCtrlChain[i]); | 464 | integer = swab32(eep->modalHeader.antCtrlChain[i]); |
465 | eep->modalHeader.antCtrlChain[i] = integer; | 465 | eep->modalHeader.antCtrlChain[i] = integer; |
466 | } | 466 | } |
@@ -914,7 +914,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah, | |||
914 | ctlMode, numCtlModes, isHt40CtlMode, | 914 | ctlMode, numCtlModes, isHt40CtlMode, |
915 | (pCtlMode[ctlMode] & EXT_ADDITIVE)); | 915 | (pCtlMode[ctlMode] & EXT_ADDITIVE)); |
916 | 916 | ||
917 | for (i = 0; (i < AR5416_NUM_CTLS) && | 917 | for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) && |
918 | pEepData->ctlIndex[i]; i++) { | 918 | pEepData->ctlIndex[i]; i++) { |
919 | DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, | 919 | DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, |
920 | " LOOP-Ctlidx %d: cfgCtl 0x%2.2x " | 920 | " LOOP-Ctlidx %d: cfgCtl 0x%2.2x " |
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 44c29b3f6728..6dcac73b4d29 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c | |||
@@ -6226,7 +6226,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv, | |||
6226 | }; | 6226 | }; |
6227 | 6227 | ||
6228 | u8 channel; | 6228 | u8 channel; |
6229 | while (channel_index < IPW_SCAN_CHANNELS) { | 6229 | while (channel_index < IPW_SCAN_CHANNELS - 1) { |
6230 | channel = | 6230 | channel = |
6231 | priv->speed_scan[priv->speed_scan_pos]; | 6231 | priv->speed_scan[priv->speed_scan_pos]; |
6232 | if (channel == 0) { | 6232 | if (channel == 0) { |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h index fbb3a573463e..2de6471d4be9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.h +++ b/drivers/net/wireless/iwlwifi/iwl-3945.h | |||
@@ -112,7 +112,7 @@ enum iwl3945_antenna { | |||
112 | #define IWL_TX_FIFO_NONE 7 | 112 | #define IWL_TX_FIFO_NONE 7 |
113 | 113 | ||
114 | /* Minimum number of queues. MAX_NUM is defined in hw specific files */ | 114 | /* Minimum number of queues. MAX_NUM is defined in hw specific files */ |
115 | #define IWL_MIN_NUM_QUEUES 4 | 115 | #define IWL39_MIN_NUM_QUEUES 4 |
116 | 116 | ||
117 | #define IEEE80211_DATA_LEN 2304 | 117 | #define IEEE80211_DATA_LEN 2304 |
118 | #define IEEE80211_4ADDR_LEN 30 | 118 | #define IEEE80211_4ADDR_LEN 30 |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 6ab07165ea28..18b135f510e5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | |||
@@ -1332,6 +1332,9 @@ int iwl_setup_mac(struct iwl_priv *priv) | |||
1332 | 1332 | ||
1333 | hw->wiphy->custom_regulatory = true; | 1333 | hw->wiphy->custom_regulatory = true; |
1334 | 1334 | ||
1335 | /* Firmware does not support this */ | ||
1336 | hw->wiphy->disable_beacon_hints = true; | ||
1337 | |||
1335 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; | 1338 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; |
1336 | /* we create the 802.11 header and a zero-length SSID element */ | 1339 | /* we create the 802.11 header and a zero-length SSID element */ |
1337 | hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; | 1340 | hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c index 11e08c068917..ca00cc8ad4c7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c | |||
@@ -308,18 +308,18 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file, | |||
308 | return -ENODATA; | 308 | return -ENODATA; |
309 | } | 309 | } |
310 | 310 | ||
311 | ptr = priv->eeprom; | ||
312 | if (!ptr) { | ||
313 | IWL_ERR(priv, "Invalid EEPROM/OTP memory\n"); | ||
314 | return -ENOMEM; | ||
315 | } | ||
316 | |||
311 | /* 4 characters for byte 0xYY */ | 317 | /* 4 characters for byte 0xYY */ |
312 | buf = kzalloc(buf_size, GFP_KERNEL); | 318 | buf = kzalloc(buf_size, GFP_KERNEL); |
313 | if (!buf) { | 319 | if (!buf) { |
314 | IWL_ERR(priv, "Can not allocate Buffer\n"); | 320 | IWL_ERR(priv, "Can not allocate Buffer\n"); |
315 | return -ENOMEM; | 321 | return -ENOMEM; |
316 | } | 322 | } |
317 | |||
318 | ptr = priv->eeprom; | ||
319 | if (!ptr) { | ||
320 | IWL_ERR(priv, "Invalid EEPROM/OTP memory\n"); | ||
321 | return -ENOMEM; | ||
322 | } | ||
323 | pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n", | 323 | pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n", |
324 | (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) | 324 | (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) |
325 | ? "OTP" : "EEPROM"); | 325 | ? "OTP" : "EEPROM"); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index e2d620f0b6e8..650e20af20fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h | |||
@@ -258,8 +258,10 @@ struct iwl_channel_info { | |||
258 | #define IWL_TX_FIFO_HCCA_2 6 | 258 | #define IWL_TX_FIFO_HCCA_2 6 |
259 | #define IWL_TX_FIFO_NONE 7 | 259 | #define IWL_TX_FIFO_NONE 7 |
260 | 260 | ||
261 | /* Minimum number of queues. MAX_NUM is defined in hw specific files */ | 261 | /* Minimum number of queues. MAX_NUM is defined in hw specific files. |
262 | #define IWL_MIN_NUM_QUEUES 4 | 262 | * Set the minimum to accommodate the 4 standard TX queues, 1 command |
263 | * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */ | ||
264 | #define IWL_MIN_NUM_QUEUES 10 | ||
263 | 265 | ||
264 | /* Power management (not Tx power) structures */ | 266 | /* Power management (not Tx power) structures */ |
265 | 267 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 2addf735b193..ffd5c61a7553 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c | |||
@@ -566,6 +566,8 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv, | |||
566 | unsigned long flags; | 566 | unsigned long flags; |
567 | 567 | ||
568 | spin_lock_irqsave(&priv->sta_lock, flags); | 568 | spin_lock_irqsave(&priv->sta_lock, flags); |
569 | IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", | ||
570 | keyconf->keyidx); | ||
569 | 571 | ||
570 | if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table)) | 572 | if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table)) |
571 | IWL_ERR(priv, "index %d not used in uCode key table.\n", | 573 | IWL_ERR(priv, "index %d not used in uCode key table.\n", |
@@ -573,6 +575,11 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv, | |||
573 | 575 | ||
574 | priv->default_wep_key--; | 576 | priv->default_wep_key--; |
575 | memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); | 577 | memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); |
578 | if (iwl_is_rfkill(priv)) { | ||
579 | IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n"); | ||
580 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
581 | return 0; | ||
582 | } | ||
576 | ret = iwl_send_static_wepkey_cmd(priv, 1); | 583 | ret = iwl_send_static_wepkey_cmd(priv, 1); |
577 | IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", | 584 | IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", |
578 | keyconf->keyidx, ret); | 585 | keyconf->keyidx, ret); |
@@ -853,6 +860,11 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv, | |||
853 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; | 860 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; |
854 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | 861 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; |
855 | 862 | ||
863 | if (iwl_is_rfkill(priv)) { | ||
864 | IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n"); | ||
865 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
866 | return 0; | ||
867 | } | ||
856 | ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); | 868 | ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); |
857 | spin_unlock_irqrestore(&priv->sta_lock, flags); | 869 | spin_unlock_irqrestore(&priv->sta_lock, flags); |
858 | return ret; | 870 | return ret; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 9bbeec9427f0..2e89040e63be 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -720,8 +720,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
720 | goto drop_unlock; | 720 | goto drop_unlock; |
721 | } | 721 | } |
722 | 722 | ||
723 | spin_unlock_irqrestore(&priv->lock, flags); | ||
724 | |||
725 | hdr_len = ieee80211_hdrlen(fc); | 723 | hdr_len = ieee80211_hdrlen(fc); |
726 | 724 | ||
727 | /* Find (or create) index into station table for destination station */ | 725 | /* Find (or create) index into station table for destination station */ |
@@ -729,7 +727,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
729 | if (sta_id == IWL_INVALID_STATION) { | 727 | if (sta_id == IWL_INVALID_STATION) { |
730 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", | 728 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", |
731 | hdr->addr1); | 729 | hdr->addr1); |
732 | goto drop; | 730 | goto drop_unlock; |
733 | } | 731 | } |
734 | 732 | ||
735 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); | 733 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); |
@@ -750,14 +748,17 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
750 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; | 748 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; |
751 | swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id); | 749 | swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id); |
752 | } | 750 | } |
753 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | ||
754 | } | 751 | } |
755 | 752 | ||
756 | txq = &priv->txq[txq_id]; | 753 | txq = &priv->txq[txq_id]; |
757 | q = &txq->q; | 754 | q = &txq->q; |
758 | txq->swq_id = swq_id; | 755 | txq->swq_id = swq_id; |
759 | 756 | ||
760 | spin_lock_irqsave(&priv->lock, flags); | 757 | if (unlikely(iwl_queue_space(q) < q->high_mark)) |
758 | goto drop_unlock; | ||
759 | |||
760 | if (ieee80211_is_data_qos(fc)) | ||
761 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | ||
761 | 762 | ||
762 | /* Set up driver data for this TFD */ | 763 | /* Set up driver data for this TFD */ |
763 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | 764 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); |
@@ -902,7 +903,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
902 | 903 | ||
903 | drop_unlock: | 904 | drop_unlock: |
904 | spin_unlock_irqrestore(&priv->lock, flags); | 905 | spin_unlock_irqrestore(&priv->lock, flags); |
905 | drop: | ||
906 | return -1; | 906 | return -1; |
907 | } | 907 | } |
908 | EXPORT_SYMBOL(iwl_tx_skb); | 908 | EXPORT_SYMBOL(iwl_tx_skb); |
@@ -1171,6 +1171,8 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) | |||
1171 | IWL_ERR(priv, "Start AGG on invalid station\n"); | 1171 | IWL_ERR(priv, "Start AGG on invalid station\n"); |
1172 | return -ENXIO; | 1172 | return -ENXIO; |
1173 | } | 1173 | } |
1174 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
1175 | return -EINVAL; | ||
1174 | 1176 | ||
1175 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { | 1177 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { |
1176 | IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); | 1178 | IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 956798f2c80c..523843369ca2 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -3968,6 +3968,9 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) | |||
3968 | 3968 | ||
3969 | hw->wiphy->custom_regulatory = true; | 3969 | hw->wiphy->custom_regulatory = true; |
3970 | 3970 | ||
3971 | /* Firmware does not support this */ | ||
3972 | hw->wiphy->disable_beacon_hints = true; | ||
3973 | |||
3971 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; | 3974 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; |
3972 | /* we create the 802.11 header and a zero-length SSID element */ | 3975 | /* we create the 802.11 header and a zero-length SSID element */ |
3973 | hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; | 3976 | hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; |
@@ -4018,10 +4021,10 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e | |||
4018 | SET_IEEE80211_DEV(hw, &pdev->dev); | 4021 | SET_IEEE80211_DEV(hw, &pdev->dev); |
4019 | 4022 | ||
4020 | if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) || | 4023 | if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) || |
4021 | (iwl3945_mod_params.num_of_queues < IWL_MIN_NUM_QUEUES)) { | 4024 | (iwl3945_mod_params.num_of_queues < IWL39_MIN_NUM_QUEUES)) { |
4022 | IWL_ERR(priv, | 4025 | IWL_ERR(priv, |
4023 | "invalid queues_num, should be between %d and %d\n", | 4026 | "invalid queues_num, should be between %d and %d\n", |
4024 | IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES); | 4027 | IWL39_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES); |
4025 | err = -EINVAL; | 4028 | err = -EINVAL; |
4026 | goto out_ieee80211_free_hw; | 4029 | goto out_ieee80211_free_hw; |
4027 | } | 4030 | } |
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c index 834a7f544e5d..e2334d123599 100644 --- a/drivers/net/wireless/iwmc3200wifi/commands.c +++ b/drivers/net/wireless/iwmc3200wifi/commands.c | |||
@@ -220,6 +220,7 @@ int iwm_store_rxiq_calib_result(struct iwm_priv *iwm) | |||
220 | eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ); | 220 | eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ); |
221 | if (IS_ERR(eeprom_rxiq)) { | 221 | if (IS_ERR(eeprom_rxiq)) { |
222 | IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n"); | 222 | IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n"); |
223 | kfree(rxiq); | ||
223 | return PTR_ERR(eeprom_rxiq); | 224 | return PTR_ERR(eeprom_rxiq); |
224 | } | 225 | } |
225 | 226 | ||
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c index aea5ccf24ccf..bf294e41753b 100644 --- a/drivers/net/wireless/iwmc3200wifi/netdev.c +++ b/drivers/net/wireless/iwmc3200wifi/netdev.c | |||
@@ -106,10 +106,8 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev, | |||
106 | int ret = 0; | 106 | int ret = 0; |
107 | 107 | ||
108 | wdev = iwm_wdev_alloc(sizeof_bus, dev); | 108 | wdev = iwm_wdev_alloc(sizeof_bus, dev); |
109 | if (!wdev) { | 109 | if (IS_ERR(wdev)) |
110 | dev_err(dev, "no memory for wireless device instance\n"); | 110 | return wdev; |
111 | return ERR_PTR(-ENOMEM); | ||
112 | } | ||
113 | 111 | ||
114 | iwm = wdev_to_iwm(wdev); | 112 | iwm = wdev_to_iwm(wdev); |
115 | iwm->bus_ops = if_ops; | 113 | iwm->bus_ops = if_ops; |
diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c index 9a5408e7d94a..5c6968101f0d 100644 --- a/drivers/net/wireless/libertas/11d.c +++ b/drivers/net/wireless/libertas/11d.c | |||
@@ -47,7 +47,7 @@ static u8 lbs_region_2_code(u8 *region) | |||
47 | { | 47 | { |
48 | u8 i; | 48 | u8 i; |
49 | 49 | ||
50 | for (i = 0; region[i] && i < COUNTRY_CODE_LEN; i++) | 50 | for (i = 0; i < COUNTRY_CODE_LEN && region[i]; i++) |
51 | region[i] = toupper(region[i]); | 51 | region[i] = toupper(region[i]); |
52 | 52 | ||
53 | for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) { | 53 | for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) { |
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h index 0a2e29140add..c8a1998d4744 100644 --- a/drivers/net/wireless/libertas/hostcmd.h +++ b/drivers/net/wireless/libertas/hostcmd.h | |||
@@ -56,8 +56,8 @@ struct rxpd { | |||
56 | u8 bss_type; | 56 | u8 bss_type; |
57 | /* BSS number */ | 57 | /* BSS number */ |
58 | u8 bss_num; | 58 | u8 bss_num; |
59 | } bss; | 59 | } __attribute__ ((packed)) bss; |
60 | } u; | 60 | } __attribute__ ((packed)) u; |
61 | 61 | ||
62 | /* SNR */ | 62 | /* SNR */ |
63 | u8 snr; | 63 | u8 snr; |
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c index 601b54249677..6c95af3023cc 100644 --- a/drivers/net/wireless/libertas/scan.c +++ b/drivers/net/wireless/libertas/scan.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * for sending scan commands to the firmware. | 5 | * for sending scan commands to the firmware. |
6 | */ | 6 | */ |
7 | #include <linux/types.h> | 7 | #include <linux/types.h> |
8 | #include <linux/kernel.h> | ||
8 | #include <linux/etherdevice.h> | 9 | #include <linux/etherdevice.h> |
9 | #include <linux/if_arp.h> | 10 | #include <linux/if_arp.h> |
10 | #include <asm/unaligned.h> | 11 | #include <asm/unaligned.h> |
@@ -876,7 +877,7 @@ static inline char *lbs_translate_scan(struct lbs_private *priv, | |||
876 | iwe.u.bitrate.disabled = 0; | 877 | iwe.u.bitrate.disabled = 0; |
877 | iwe.u.bitrate.value = 0; | 878 | iwe.u.bitrate.value = 0; |
878 | 879 | ||
879 | for (j = 0; bss->rates[j] && (j < sizeof(bss->rates)); j++) { | 880 | for (j = 0; j < ARRAY_SIZE(bss->rates) && bss->rates[j]; j++) { |
880 | /* Bit rate given in 500 kb/s units */ | 881 | /* Bit rate given in 500 kb/s units */ |
881 | iwe.u.bitrate.value = bss->rates[j] * 500000; | 882 | iwe.u.bitrate.value = bss->rates[j] * 500000; |
882 | current_val = iwe_stream_add_value(info, start, current_val, | 883 | current_val = iwe_stream_add_value(info, start, current_val, |
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index a263d5c84c08..83967afe0821 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c | |||
@@ -261,7 +261,7 @@ struct mwl8k_vif { | |||
261 | */ | 261 | */ |
262 | }; | 262 | }; |
263 | 263 | ||
264 | #define MWL8K_VIF(_vif) (struct mwl8k_vif *)(&((_vif)->drv_priv)) | 264 | #define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv)) |
265 | 265 | ||
266 | static const struct ieee80211_channel mwl8k_channels[] = { | 266 | static const struct ieee80211_channel mwl8k_channels[] = { |
267 | { .center_freq = 2412, .hw_value = 1, }, | 267 | { .center_freq = 2412, .hw_value = 1, }, |
@@ -1012,6 +1012,8 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit) | |||
1012 | rmb(); | 1012 | rmb(); |
1013 | 1013 | ||
1014 | skb = rxq->rx_skb[rxq->rx_head]; | 1014 | skb = rxq->rx_skb[rxq->rx_head]; |
1015 | if (skb == NULL) | ||
1016 | break; | ||
1015 | rxq->rx_skb[rxq->rx_head] = NULL; | 1017 | rxq->rx_skb[rxq->rx_head] = NULL; |
1016 | 1018 | ||
1017 | rxq->rx_head = (rxq->rx_head + 1) % MWL8K_RX_DESCS; | 1019 | rxq->rx_head = (rxq->rx_head + 1) % MWL8K_RX_DESCS; |
@@ -1591,6 +1593,9 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) | |||
1591 | timeout = wait_for_completion_timeout(&cmd_wait, | 1593 | timeout = wait_for_completion_timeout(&cmd_wait, |
1592 | msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS)); | 1594 | msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS)); |
1593 | 1595 | ||
1596 | pci_unmap_single(priv->pdev, dma_addr, dma_size, | ||
1597 | PCI_DMA_BIDIRECTIONAL); | ||
1598 | |||
1594 | result = &cmd->result; | 1599 | result = &cmd->result; |
1595 | if (!timeout) { | 1600 | if (!timeout) { |
1596 | spin_lock_irq(&priv->fw_lock); | 1601 | spin_lock_irq(&priv->fw_lock); |
@@ -1610,8 +1615,6 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) | |||
1610 | *result); | 1615 | *result); |
1611 | } | 1616 | } |
1612 | 1617 | ||
1613 | pci_unmap_single(priv->pdev, dma_addr, dma_size, | ||
1614 | PCI_DMA_BIDIRECTIONAL); | ||
1615 | return rc; | 1618 | return rc; |
1616 | } | 1619 | } |
1617 | 1620 | ||
@@ -1654,18 +1657,18 @@ static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw) | |||
1654 | memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr)); | 1657 | memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr)); |
1655 | cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); | 1658 | cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); |
1656 | cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rx_desc_dma); | 1659 | cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rx_desc_dma); |
1657 | cmd->num_tx_queues = MWL8K_TX_QUEUES; | 1660 | cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); |
1658 | for (i = 0; i < MWL8K_TX_QUEUES; i++) | 1661 | for (i = 0; i < MWL8K_TX_QUEUES; i++) |
1659 | cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].tx_desc_dma); | 1662 | cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].tx_desc_dma); |
1660 | cmd->num_tx_desc_per_queue = MWL8K_TX_DESCS; | 1663 | cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); |
1661 | cmd->total_rx_desc = MWL8K_RX_DESCS; | 1664 | cmd->total_rx_desc = cpu_to_le32(MWL8K_RX_DESCS); |
1662 | 1665 | ||
1663 | rc = mwl8k_post_cmd(hw, &cmd->header); | 1666 | rc = mwl8k_post_cmd(hw, &cmd->header); |
1664 | 1667 | ||
1665 | if (!rc) { | 1668 | if (!rc) { |
1666 | SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr); | 1669 | SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr); |
1667 | priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); | 1670 | priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); |
1668 | priv->fw_rev = cmd->fw_rev; | 1671 | priv->fw_rev = le32_to_cpu(cmd->fw_rev); |
1669 | priv->hw_rev = cmd->hw_rev; | 1672 | priv->hw_rev = cmd->hw_rev; |
1670 | priv->region_code = le16_to_cpu(cmd->region_code); | 1673 | priv->region_code = le16_to_cpu(cmd->region_code); |
1671 | } | 1674 | } |
@@ -3216,15 +3219,19 @@ static int mwl8k_configure_filter_wt(struct work_struct *wt) | |||
3216 | struct dev_addr_list *mclist = worker->mclist; | 3219 | struct dev_addr_list *mclist = worker->mclist; |
3217 | 3220 | ||
3218 | struct mwl8k_priv *priv = hw->priv; | 3221 | struct mwl8k_priv *priv = hw->priv; |
3219 | struct mwl8k_vif *mv_vif; | ||
3220 | int rc = 0; | 3222 | int rc = 0; |
3221 | 3223 | ||
3222 | if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { | 3224 | if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { |
3223 | if (*total_flags & FIF_BCN_PRBRESP_PROMISC) | 3225 | if (*total_flags & FIF_BCN_PRBRESP_PROMISC) |
3224 | rc = mwl8k_cmd_set_pre_scan(hw); | 3226 | rc = mwl8k_cmd_set_pre_scan(hw); |
3225 | else { | 3227 | else { |
3226 | mv_vif = MWL8K_VIF(priv->vif); | 3228 | u8 *bssid; |
3227 | rc = mwl8k_cmd_set_post_scan(hw, mv_vif->bssid); | 3229 | |
3230 | bssid = "\x00\x00\x00\x00\x00\x00"; | ||
3231 | if (priv->vif != NULL) | ||
3232 | bssid = MWL8K_VIF(priv->vif)->bssid; | ||
3233 | |||
3234 | rc = mwl8k_cmd_set_post_scan(hw, bssid); | ||
3228 | } | 3235 | } |
3229 | } | 3236 | } |
3230 | 3237 | ||
@@ -3726,6 +3733,8 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev) | |||
3726 | 3733 | ||
3727 | ieee80211_stop_queues(hw); | 3734 | ieee80211_stop_queues(hw); |
3728 | 3735 | ||
3736 | ieee80211_unregister_hw(hw); | ||
3737 | |||
3729 | /* Remove tx reclaim tasklet */ | 3738 | /* Remove tx reclaim tasklet */ |
3730 | tasklet_kill(&priv->tx_reclaim_task); | 3739 | tasklet_kill(&priv->tx_reclaim_task); |
3731 | 3740 | ||
@@ -3739,8 +3748,6 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev) | |||
3739 | for (i = 0; i < MWL8K_TX_QUEUES; i++) | 3748 | for (i = 0; i < MWL8K_TX_QUEUES; i++) |
3740 | mwl8k_txq_reclaim(hw, i, 1); | 3749 | mwl8k_txq_reclaim(hw, i, 1); |
3741 | 3750 | ||
3742 | ieee80211_unregister_hw(hw); | ||
3743 | |||
3744 | for (i = 0; i < MWL8K_TX_QUEUES; i++) | 3751 | for (i = 0; i < MWL8K_TX_QUEUES; i++) |
3745 | mwl8k_txq_deinit(hw, i); | 3752 | mwl8k_txq_deinit(hw, i); |
3746 | 3753 | ||
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c index 632fac86a308..b3946272c72e 100644 --- a/drivers/net/wireless/orinoco/hw.c +++ b/drivers/net/wireless/orinoco/hw.c | |||
@@ -70,7 +70,7 @@ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc) | |||
70 | int err = 0; | 70 | int err = 0; |
71 | u8 tsc_arr[4][IW_ENCODE_SEQ_MAX_SIZE]; | 71 | u8 tsc_arr[4][IW_ENCODE_SEQ_MAX_SIZE]; |
72 | 72 | ||
73 | if ((key < 0) || (key > 4)) | 73 | if ((key < 0) || (key >= 4)) |
74 | return -EINVAL; | 74 | return -EINVAL; |
75 | 75 | ||
76 | err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV, | 76 | err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV, |
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index a498dde024e1..49c9e2c1433d 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h | |||
@@ -849,13 +849,15 @@ struct rt2x00_dev { | |||
849 | static inline void rt2x00_rf_read(struct rt2x00_dev *rt2x00dev, | 849 | static inline void rt2x00_rf_read(struct rt2x00_dev *rt2x00dev, |
850 | const unsigned int word, u32 *data) | 850 | const unsigned int word, u32 *data) |
851 | { | 851 | { |
852 | *data = rt2x00dev->rf[word]; | 852 | BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32)); |
853 | *data = rt2x00dev->rf[word - 1]; | ||
853 | } | 854 | } |
854 | 855 | ||
855 | static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev, | 856 | static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev, |
856 | const unsigned int word, u32 data) | 857 | const unsigned int word, u32 data) |
857 | { | 858 | { |
858 | rt2x00dev->rf[word] = data; | 859 | BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32)); |
860 | rt2x00dev->rf[word - 1] = data; | ||
859 | } | 861 | } |
860 | 862 | ||
861 | /* | 863 | /* |
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c index 294250e294dd..87a95588a8e3 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c | |||
@@ -869,6 +869,9 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev) | |||
869 | priv->aifsn[3] = 3; /* AIFSN[AC_BE] */ | 869 | priv->aifsn[3] = 3; /* AIFSN[AC_BE] */ |
870 | rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0); | 870 | rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0); |
871 | 871 | ||
872 | /* ENEDCA flag must always be set, transmit issues? */ | ||
873 | rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA); | ||
874 | |||
872 | return 0; | 875 | return 0; |
873 | } | 876 | } |
874 | 877 | ||
@@ -1173,13 +1176,16 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev, | |||
1173 | rtl818x_iowrite8(priv, &priv->map->BSSID[i], | 1176 | rtl818x_iowrite8(priv, &priv->map->BSSID[i], |
1174 | info->bssid[i]); | 1177 | info->bssid[i]); |
1175 | 1178 | ||
1179 | if (priv->is_rtl8187b) | ||
1180 | reg = RTL818X_MSR_ENEDCA; | ||
1181 | else | ||
1182 | reg = 0; | ||
1183 | |||
1176 | if (is_valid_ether_addr(info->bssid)) { | 1184 | if (is_valid_ether_addr(info->bssid)) { |
1177 | reg = RTL818X_MSR_INFRA; | 1185 | reg |= RTL818X_MSR_INFRA; |
1178 | if (priv->is_rtl8187b) | ||
1179 | reg |= RTL818X_MSR_ENEDCA; | ||
1180 | rtl818x_iowrite8(priv, &priv->map->MSR, reg); | 1186 | rtl818x_iowrite8(priv, &priv->map->MSR, reg); |
1181 | } else { | 1187 | } else { |
1182 | reg = RTL818X_MSR_NO_LINK; | 1188 | reg |= RTL818X_MSR_NO_LINK; |
1183 | rtl818x_iowrite8(priv, &priv->map->MSR, reg); | 1189 | rtl818x_iowrite8(priv, &priv->map->MSR, reg); |
1184 | } | 1190 | } |
1185 | 1191 | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 40b07b988224..3bd3c779fff3 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c | |||
@@ -698,7 +698,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length) | |||
698 | && !mac->pass_ctrl) | 698 | && !mac->pass_ctrl) |
699 | return 0; | 699 | return 0; |
700 | 700 | ||
701 | fc = *(__le16 *)buffer; | 701 | fc = get_unaligned((__le16*)buffer); |
702 | need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc); | 702 | need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc); |
703 | 703 | ||
704 | skb = dev_alloc_skb(length + (need_padding ? 2 : 0)); | 704 | skb = dev_alloc_skb(length + (need_padding ? 2 : 0)); |
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index a07580138e81..c2fd6187773f 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c | |||
@@ -346,7 +346,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | |||
346 | static int yellowfin_open(struct net_device *dev); | 346 | static int yellowfin_open(struct net_device *dev); |
347 | static void yellowfin_timer(unsigned long data); | 347 | static void yellowfin_timer(unsigned long data); |
348 | static void yellowfin_tx_timeout(struct net_device *dev); | 348 | static void yellowfin_tx_timeout(struct net_device *dev); |
349 | static void yellowfin_init_ring(struct net_device *dev); | 349 | static int yellowfin_init_ring(struct net_device *dev); |
350 | static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev); | 350 | static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev); |
351 | static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance); | 351 | static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance); |
352 | static int yellowfin_rx(struct net_device *dev); | 352 | static int yellowfin_rx(struct net_device *dev); |
@@ -573,19 +573,24 @@ static int yellowfin_open(struct net_device *dev) | |||
573 | { | 573 | { |
574 | struct yellowfin_private *yp = netdev_priv(dev); | 574 | struct yellowfin_private *yp = netdev_priv(dev); |
575 | void __iomem *ioaddr = yp->base; | 575 | void __iomem *ioaddr = yp->base; |
576 | int i; | 576 | int i, ret; |
577 | 577 | ||
578 | /* Reset the chip. */ | 578 | /* Reset the chip. */ |
579 | iowrite32(0x80000000, ioaddr + DMACtrl); | 579 | iowrite32(0x80000000, ioaddr + DMACtrl); |
580 | 580 | ||
581 | i = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev); | 581 | ret = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev); |
582 | if (i) return i; | 582 | if (ret) |
583 | return ret; | ||
583 | 584 | ||
584 | if (yellowfin_debug > 1) | 585 | if (yellowfin_debug > 1) |
585 | printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n", | 586 | printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n", |
586 | dev->name, dev->irq); | 587 | dev->name, dev->irq); |
587 | 588 | ||
588 | yellowfin_init_ring(dev); | 589 | ret = yellowfin_init_ring(dev); |
590 | if (ret) { | ||
591 | free_irq(dev->irq, dev); | ||
592 | return ret; | ||
593 | } | ||
589 | 594 | ||
590 | iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); | 595 | iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); |
591 | iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); | 596 | iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); |
@@ -725,10 +730,10 @@ static void yellowfin_tx_timeout(struct net_device *dev) | |||
725 | } | 730 | } |
726 | 731 | ||
727 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ | 732 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ |
728 | static void yellowfin_init_ring(struct net_device *dev) | 733 | static int yellowfin_init_ring(struct net_device *dev) |
729 | { | 734 | { |
730 | struct yellowfin_private *yp = netdev_priv(dev); | 735 | struct yellowfin_private *yp = netdev_priv(dev); |
731 | int i; | 736 | int i, j; |
732 | 737 | ||
733 | yp->tx_full = 0; | 738 | yp->tx_full = 0; |
734 | yp->cur_rx = yp->cur_tx = 0; | 739 | yp->cur_rx = yp->cur_tx = 0; |
@@ -753,6 +758,11 @@ static void yellowfin_init_ring(struct net_device *dev) | |||
753 | yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, | 758 | yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, |
754 | skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); | 759 | skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); |
755 | } | 760 | } |
761 | if (i != RX_RING_SIZE) { | ||
762 | for (j = 0; j < i; j++) | ||
763 | dev_kfree_skb(yp->rx_skbuff[j]); | ||
764 | return -ENOMEM; | ||
765 | } | ||
756 | yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); | 766 | yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); |
757 | yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); | 767 | yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); |
758 | 768 | ||
@@ -769,8 +779,6 @@ static void yellowfin_init_ring(struct net_device *dev) | |||
769 | yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); | 779 | yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); |
770 | #else | 780 | #else |
771 | { | 781 | { |
772 | int j; | ||
773 | |||
774 | /* Tx ring needs a pair of descriptors, the second for the status. */ | 782 | /* Tx ring needs a pair of descriptors, the second for the status. */ |
775 | for (i = 0; i < TX_RING_SIZE; i++) { | 783 | for (i = 0; i < TX_RING_SIZE; i++) { |
776 | j = 2*i; | 784 | j = 2*i; |
@@ -805,7 +813,7 @@ static void yellowfin_init_ring(struct net_device *dev) | |||
805 | } | 813 | } |
806 | #endif | 814 | #endif |
807 | yp->tx_tail_desc = &yp->tx_status[0]; | 815 | yp->tx_tail_desc = &yp->tx_status[0]; |
808 | return; | 816 | return 0; |
809 | } | 817 | } |
810 | 818 | ||
811 | static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev) | 819 | static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev) |
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index 37c84e3b8be0..81c753a617ab 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c | |||
@@ -120,6 +120,9 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, | |||
120 | for (i = ARRAY_SIZE(cards)-1; i >= 0; i--) | 120 | for (i = ARRAY_SIZE(cards)-1; i >= 0; i--) |
121 | if (z->id == cards[i].id) | 121 | if (z->id == cards[i].id) |
122 | break; | 122 | break; |
123 | if (i < 0) | ||
124 | return -ENODEV; | ||
125 | |||
123 | board = z->resource.start; | 126 | board = z->resource.start; |
124 | ioaddr = board+cards[i].offset; | 127 | ioaddr = board+cards[i].offset; |
125 | dev = alloc_ei_netdev(); | 128 | dev = alloc_ei_netdev(); |