diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-07-09 16:34:25 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-07-09 16:34:25 -0400 |
commit | f974a8ec96571535ee07880a023bcce0e3f2c76b (patch) | |
tree | 5cf09207b1ad292a55275cd0b24999fa29b9dfe8 /drivers/net | |
parent | c0b8556f2f8146bd38324b14b1ce00f249ba8ed9 (diff) | |
parent | 4ed47896935573c8423d05bddda3f269d6e6c613 (diff) |
Merge branch 'machtypes' into pxa-palm
Diffstat (limited to 'drivers/net')
126 files changed, 1803 insertions, 1227 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index e6c545fe5f58..b9d097c9f6bb 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c | |||
@@ -413,7 +413,7 @@ static int __devinit el3_pnp_probe(struct pnp_dev *pdev, | |||
413 | { | 413 | { |
414 | short i; | 414 | short i; |
415 | int ioaddr, irq, if_port; | 415 | int ioaddr, irq, if_port; |
416 | u16 phys_addr[3]; | 416 | __be16 phys_addr[3]; |
417 | struct net_device *dev = NULL; | 417 | struct net_device *dev = NULL; |
418 | int err; | 418 | int err; |
419 | 419 | ||
@@ -605,7 +605,7 @@ static int __init el3_mca_probe(struct device *device) | |||
605 | 605 | ||
606 | short i; | 606 | short i; |
607 | int ioaddr, irq, if_port; | 607 | int ioaddr, irq, if_port; |
608 | u16 phys_addr[3]; | 608 | __be16 phys_addr[3]; |
609 | struct net_device *dev = NULL; | 609 | struct net_device *dev = NULL; |
610 | u_char pos4, pos5; | 610 | u_char pos4, pos5; |
611 | struct mca_device *mdev = to_mca_device(device); | 611 | struct mca_device *mdev = to_mca_device(device); |
@@ -635,14 +635,13 @@ static int __init el3_mca_probe(struct device *device) | |||
635 | printk(KERN_DEBUG "3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port); | 635 | printk(KERN_DEBUG "3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port); |
636 | } | 636 | } |
637 | EL3WINDOW(0); | 637 | EL3WINDOW(0); |
638 | for (i = 0; i < 3; i++) { | 638 | for (i = 0; i < 3; i++) |
639 | phys_addr[i] = htons(read_eeprom(ioaddr, i)); | 639 | phys_addr[i] = htons(read_eeprom(ioaddr, i)); |
640 | } | ||
641 | 640 | ||
642 | dev = alloc_etherdev(sizeof (struct el3_private)); | 641 | dev = alloc_etherdev(sizeof (struct el3_private)); |
643 | if (dev == NULL) { | 642 | if (dev == NULL) { |
644 | release_region(ioaddr, EL3_IO_EXTENT); | 643 | release_region(ioaddr, EL3_IO_EXTENT); |
645 | return -ENOMEM; | 644 | return -ENOMEM; |
646 | } | 645 | } |
647 | 646 | ||
648 | netdev_boot_setup_check(dev); | 647 | netdev_boot_setup_check(dev); |
@@ -668,7 +667,7 @@ static int __init el3_eisa_probe (struct device *device) | |||
668 | { | 667 | { |
669 | short i; | 668 | short i; |
670 | int ioaddr, irq, if_port; | 669 | int ioaddr, irq, if_port; |
671 | u16 phys_addr[3]; | 670 | __be16 phys_addr[3]; |
672 | struct net_device *dev = NULL; | 671 | struct net_device *dev = NULL; |
673 | struct eisa_device *edev; | 672 | struct eisa_device *edev; |
674 | int err; | 673 | int err; |
@@ -1063,7 +1062,6 @@ el3_rx(struct net_device *dev) | |||
1063 | struct sk_buff *skb; | 1062 | struct sk_buff *skb; |
1064 | 1063 | ||
1065 | skb = dev_alloc_skb(pkt_len+5); | 1064 | skb = dev_alloc_skb(pkt_len+5); |
1066 | dev->stats.rx_bytes += pkt_len; | ||
1067 | if (el3_debug > 4) | 1065 | if (el3_debug > 4) |
1068 | printk("Receiving packet size %d status %4.4x.\n", | 1066 | printk("Receiving packet size %d status %4.4x.\n", |
1069 | pkt_len, rx_status); | 1067 | pkt_len, rx_status); |
@@ -1078,6 +1076,7 @@ el3_rx(struct net_device *dev) | |||
1078 | skb->protocol = eth_type_trans(skb,dev); | 1076 | skb->protocol = eth_type_trans(skb,dev); |
1079 | netif_rx(skb); | 1077 | netif_rx(skb); |
1080 | dev->last_rx = jiffies; | 1078 | dev->last_rx = jiffies; |
1079 | dev->stats.rx_bytes += pkt_len; | ||
1081 | dev->stats.rx_packets++; | 1080 | dev->stats.rx_packets++; |
1082 | continue; | 1081 | continue; |
1083 | } | 1082 | } |
diff --git a/drivers/net/7990.c b/drivers/net/7990.c index 750a46f4bc58..ad6b8a5b6574 100644 --- a/drivers/net/7990.c +++ b/drivers/net/7990.c | |||
@@ -506,6 +506,7 @@ int lance_open (struct net_device *dev) | |||
506 | 506 | ||
507 | return res; | 507 | return res; |
508 | } | 508 | } |
509 | EXPORT_SYMBOL_GPL(lance_open); | ||
509 | 510 | ||
510 | int lance_close (struct net_device *dev) | 511 | int lance_close (struct net_device *dev) |
511 | { | 512 | { |
@@ -521,6 +522,7 @@ int lance_close (struct net_device *dev) | |||
521 | 522 | ||
522 | return 0; | 523 | return 0; |
523 | } | 524 | } |
525 | EXPORT_SYMBOL_GPL(lance_close); | ||
524 | 526 | ||
525 | void lance_tx_timeout(struct net_device *dev) | 527 | void lance_tx_timeout(struct net_device *dev) |
526 | { | 528 | { |
@@ -529,7 +531,7 @@ void lance_tx_timeout(struct net_device *dev) | |||
529 | dev->trans_start = jiffies; | 531 | dev->trans_start = jiffies; |
530 | netif_wake_queue (dev); | 532 | netif_wake_queue (dev); |
531 | } | 533 | } |
532 | 534 | EXPORT_SYMBOL_GPL(lance_tx_timeout); | |
533 | 535 | ||
534 | int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) | 536 | int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) |
535 | { | 537 | { |
@@ -586,6 +588,7 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
586 | 588 | ||
587 | return 0; | 589 | return 0; |
588 | } | 590 | } |
591 | EXPORT_SYMBOL_GPL(lance_start_xmit); | ||
589 | 592 | ||
590 | /* taken from the depca driver via a2065.c */ | 593 | /* taken from the depca driver via a2065.c */ |
591 | static void lance_load_multicast (struct net_device *dev) | 594 | static void lance_load_multicast (struct net_device *dev) |
@@ -654,6 +657,7 @@ void lance_set_multicast (struct net_device *dev) | |||
654 | if (!stopped) | 657 | if (!stopped) |
655 | netif_start_queue (dev); | 658 | netif_start_queue (dev); |
656 | } | 659 | } |
660 | EXPORT_SYMBOL_GPL(lance_set_multicast); | ||
657 | 661 | ||
658 | #ifdef CONFIG_NET_POLL_CONTROLLER | 662 | #ifdef CONFIG_NET_POLL_CONTROLLER |
659 | void lance_poll(struct net_device *dev) | 663 | void lance_poll(struct net_device *dev) |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 9f6cc8a56073..f4182cfffe9d 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1353,7 +1353,7 @@ config APRICOT | |||
1353 | 1353 | ||
1354 | config B44 | 1354 | config B44 |
1355 | tristate "Broadcom 440x/47xx ethernet support" | 1355 | tristate "Broadcom 440x/47xx ethernet support" |
1356 | depends on SSB_POSSIBLE | 1356 | depends on SSB_POSSIBLE && HAS_DMA |
1357 | select SSB | 1357 | select SSB |
1358 | select MII | 1358 | select MII |
1359 | help | 1359 | help |
@@ -2426,7 +2426,7 @@ config CHELSIO_T3 | |||
2426 | 2426 | ||
2427 | config EHEA | 2427 | config EHEA |
2428 | tristate "eHEA Ethernet support" | 2428 | tristate "eHEA Ethernet support" |
2429 | depends on IBMEBUS && INET && SPARSEMEM && MEMORY_HOTPLUG | 2429 | depends on IBMEBUS && INET && SPARSEMEM |
2430 | select INET_LRO | 2430 | select INET_LRO |
2431 | ---help--- | 2431 | ---help--- |
2432 | This driver supports the IBM pSeries eHEA ethernet adapter. | 2432 | This driver supports the IBM pSeries eHEA ethernet adapter. |
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 9c2394d49428..3c798ae5c343 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c | |||
@@ -471,7 +471,6 @@ static int atl1_get_permanent_address(struct atl1_hw *hw) | |||
471 | memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); | 471 | memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); |
472 | return 0; | 472 | return 0; |
473 | } | 473 | } |
474 | return 1; | ||
475 | } | 474 | } |
476 | 475 | ||
477 | /* see if SPI FLAGS exist ? */ | 476 | /* see if SPI FLAGS exist ? */ |
@@ -637,22 +636,6 @@ static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) | |||
637 | } | 636 | } |
638 | 637 | ||
639 | /* | 638 | /* |
640 | * Force the PHY into power saving mode using vendor magic. | ||
641 | */ | ||
642 | #ifdef CONFIG_PM | ||
643 | static void atl1_phy_enter_power_saving(struct atl1_hw *hw) | ||
644 | { | ||
645 | atl1_write_phy_reg(hw, MII_DBG_ADDR, 0); | ||
646 | atl1_write_phy_reg(hw, MII_DBG_DATA, 0x124E); | ||
647 | atl1_write_phy_reg(hw, MII_DBG_ADDR, 2); | ||
648 | atl1_write_phy_reg(hw, MII_DBG_DATA, 0x3000); | ||
649 | atl1_write_phy_reg(hw, MII_DBG_ADDR, 3); | ||
650 | atl1_write_phy_reg(hw, MII_DBG_DATA, 0); | ||
651 | |||
652 | } | ||
653 | #endif | ||
654 | |||
655 | /* | ||
656 | * Resets the PHY and make all config validate | 639 | * Resets the PHY and make all config validate |
657 | * hw - Struct containing variables accessed by shared code | 640 | * hw - Struct containing variables accessed by shared code |
658 | * | 641 | * |
@@ -2023,6 +2006,7 @@ rrd_ok: | |||
2023 | /* Good Receive */ | 2006 | /* Good Receive */ |
2024 | pci_unmap_page(adapter->pdev, buffer_info->dma, | 2007 | pci_unmap_page(adapter->pdev, buffer_info->dma, |
2025 | buffer_info->length, PCI_DMA_FROMDEVICE); | 2008 | buffer_info->length, PCI_DMA_FROMDEVICE); |
2009 | buffer_info->dma = 0; | ||
2026 | skb = buffer_info->skb; | 2010 | skb = buffer_info->skb; |
2027 | length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); | 2011 | length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); |
2028 | 2012 | ||
@@ -2135,7 +2119,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, | |||
2135 | return -1; | 2119 | return -1; |
2136 | } | 2120 | } |
2137 | 2121 | ||
2138 | if (skb->protocol == ntohs(ETH_P_IP)) { | 2122 | if (skb->protocol == htons(ETH_P_IP)) { |
2139 | struct iphdr *iph = ip_hdr(skb); | 2123 | struct iphdr *iph = ip_hdr(skb); |
2140 | 2124 | ||
2141 | real_len = (((unsigned char *)iph - skb->data) + | 2125 | real_len = (((unsigned char *)iph - skb->data) + |
@@ -2859,7 +2843,6 @@ disable_wol: | |||
2859 | ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; | 2843 | ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; |
2860 | iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); | 2844 | iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); |
2861 | ioread32(hw->hw_addr + REG_PCIE_PHYMISC); | 2845 | ioread32(hw->hw_addr + REG_PCIE_PHYMISC); |
2862 | atl1_phy_enter_power_saving(hw); | ||
2863 | hw->phy_configured = false; | 2846 | hw->phy_configured = false; |
2864 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); | 2847 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); |
2865 | exit: | 2848 | exit: |
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 3634b5fd7919..7023d77bf380 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
@@ -1239,12 +1239,7 @@ static int au1000_rx(struct net_device *dev) | |||
1239 | */ | 1239 | */ |
1240 | static irqreturn_t au1000_interrupt(int irq, void *dev_id) | 1240 | static irqreturn_t au1000_interrupt(int irq, void *dev_id) |
1241 | { | 1241 | { |
1242 | struct net_device *dev = (struct net_device *) dev_id; | 1242 | struct net_device *dev = dev_id; |
1243 | |||
1244 | if (dev == NULL) { | ||
1245 | printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name); | ||
1246 | return IRQ_RETVAL(1); | ||
1247 | } | ||
1248 | 1243 | ||
1249 | /* Handle RX interrupts first to minimize chance of overrun */ | 1244 | /* Handle RX interrupts first to minimize chance of overrun */ |
1250 | 1245 | ||
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 89c0018132ec..41443435ab1c 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/crc32.h> | 22 | #include <linux/crc32.h> |
23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/ethtool.h> | ||
26 | #include <linux/mii.h> | 25 | #include <linux/mii.h> |
27 | #include <linux/phy.h> | 26 | #include <linux/phy.h> |
28 | #include <linux/netdevice.h> | 27 | #include <linux/netdevice.h> |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 4b46e68183e0..367b6d462708 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -5724,14 +5724,12 @@ bnx2_reset_task(struct work_struct *work) | |||
5724 | if (!netif_running(bp->dev)) | 5724 | if (!netif_running(bp->dev)) |
5725 | return; | 5725 | return; |
5726 | 5726 | ||
5727 | bp->in_reset_task = 1; | ||
5728 | bnx2_netif_stop(bp); | 5727 | bnx2_netif_stop(bp); |
5729 | 5728 | ||
5730 | bnx2_init_nic(bp); | 5729 | bnx2_init_nic(bp); |
5731 | 5730 | ||
5732 | atomic_set(&bp->intr_sem, 1); | 5731 | atomic_set(&bp->intr_sem, 1); |
5733 | bnx2_netif_start(bp); | 5732 | bnx2_netif_start(bp); |
5734 | bp->in_reset_task = 0; | ||
5735 | } | 5733 | } |
5736 | 5734 | ||
5737 | static void | 5735 | static void |
@@ -5907,12 +5905,7 @@ bnx2_close(struct net_device *dev) | |||
5907 | struct bnx2 *bp = netdev_priv(dev); | 5905 | struct bnx2 *bp = netdev_priv(dev); |
5908 | u32 reset_code; | 5906 | u32 reset_code; |
5909 | 5907 | ||
5910 | /* Calling flush_scheduled_work() may deadlock because | 5908 | cancel_work_sync(&bp->reset_task); |
5911 | * linkwatch_event() may be on the workqueue and it will try to get | ||
5912 | * the rtnl_lock which we are holding. | ||
5913 | */ | ||
5914 | while (bp->in_reset_task) | ||
5915 | msleep(1); | ||
5916 | 5909 | ||
5917 | bnx2_disable_int_sync(bp); | 5910 | bnx2_disable_int_sync(bp); |
5918 | bnx2_napi_disable(bp); | 5911 | bnx2_napi_disable(bp); |
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 1eaf5bb3d9c2..2377cc13bf61 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h | |||
@@ -6656,7 +6656,6 @@ struct bnx2 { | |||
6656 | int current_interval; | 6656 | int current_interval; |
6657 | struct timer_list timer; | 6657 | struct timer_list timer; |
6658 | struct work_struct reset_task; | 6658 | struct work_struct reset_task; |
6659 | int in_reset_task; | ||
6660 | 6659 | ||
6661 | /* Used to synchronize phy accesses. */ | 6660 | /* Used to synchronize phy accesses. */ |
6662 | spinlock_t phy_lock; | 6661 | spinlock_t phy_lock; |
diff --git a/drivers/net/bnx2x.c b/drivers/net/bnx2x.c index 7bdb5af35951..70cba64732ca 100644 --- a/drivers/net/bnx2x.c +++ b/drivers/net/bnx2x.c | |||
@@ -6,7 +6,8 @@ | |||
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation. | 7 | * the Free Software Foundation. |
8 | * | 8 | * |
9 | * Written by: Eliezer Tamir <eliezert@broadcom.com> | 9 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> |
10 | * Written by: Eliezer Tamir | ||
10 | * Based on code from Michael Chan's bnx2 driver | 11 | * Based on code from Michael Chan's bnx2 driver |
11 | * UDP CSUM errata workaround by Arik Gendelman | 12 | * UDP CSUM errata workaround by Arik Gendelman |
12 | * Slowpath rework by Vladislav Zolotarov | 13 | * Slowpath rework by Vladislav Zolotarov |
@@ -74,7 +75,7 @@ static char version[] __devinitdata = | |||
74 | "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver " | 75 | "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver " |
75 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 76 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
76 | 77 | ||
77 | MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>"); | 78 | MODULE_AUTHOR("Eliezer Tamir"); |
78 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); | 79 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); |
79 | MODULE_LICENSE("GPL"); | 80 | MODULE_LICENSE("GPL"); |
80 | MODULE_VERSION(DRV_MODULE_VERSION); | 81 | MODULE_VERSION(DRV_MODULE_VERSION); |
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h index 4f0c0d31e7c1..8e68d06510a6 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x.h | |||
@@ -6,7 +6,8 @@ | |||
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation. | 7 | * the Free Software Foundation. |
8 | * | 8 | * |
9 | * Written by: Eliezer Tamir <eliezert@broadcom.com> | 9 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> |
10 | * Written by: Eliezer Tamir | ||
10 | * Based on code from Michael Chan's bnx2 driver | 11 | * Based on code from Michael Chan's bnx2 driver |
11 | */ | 12 | */ |
12 | 13 | ||
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h index dcaecc53bdb1..370686eef97c 100644 --- a/drivers/net/bnx2x_init.h +++ b/drivers/net/bnx2x_init.h | |||
@@ -6,7 +6,8 @@ | |||
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation. | 7 | * the Free Software Foundation. |
8 | * | 8 | * |
9 | * Written by: Eliezer Tamir <eliezert@broadcom.com> | 9 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> |
10 | * Written by: Eliezer Tamir | ||
10 | */ | 11 | */ |
11 | 12 | ||
12 | #ifndef BNX2X_INIT_H | 13 | #ifndef BNX2X_INIT_H |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 68c41a00d93d..08f3d396bcd6 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -1437,8 +1437,16 @@ int bond_create_sysfs(void) | |||
1437 | * configure multiple bonding devices. | 1437 | * configure multiple bonding devices. |
1438 | */ | 1438 | */ |
1439 | if (ret == -EEXIST) { | 1439 | if (ret == -EEXIST) { |
1440 | netdev_class = NULL; | 1440 | /* Is someone being kinky and naming a device bonding_master? */ |
1441 | return 0; | 1441 | if (__dev_get_by_name(&init_net, |
1442 | class_attr_bonding_masters.attr.name)) | ||
1443 | printk(KERN_ERR | ||
1444 | "network device named %s already exists in sysfs", | ||
1445 | class_attr_bonding_masters.attr.name); | ||
1446 | else { | ||
1447 | netdev_class = NULL; | ||
1448 | return 0; | ||
1449 | } | ||
1442 | } | 1450 | } |
1443 | 1451 | ||
1444 | return ret; | 1452 | return ret; |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 93e13636f8dd..83768df27806 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -142,8 +142,8 @@ | |||
142 | 142 | ||
143 | #define DRV_MODULE_NAME "cassini" | 143 | #define DRV_MODULE_NAME "cassini" |
144 | #define PFX DRV_MODULE_NAME ": " | 144 | #define PFX DRV_MODULE_NAME ": " |
145 | #define DRV_MODULE_VERSION "1.5" | 145 | #define DRV_MODULE_VERSION "1.6" |
146 | #define DRV_MODULE_RELDATE "4 Jan 2008" | 146 | #define DRV_MODULE_RELDATE "21 May 2008" |
147 | 147 | ||
148 | #define CAS_DEF_MSG_ENABLE \ | 148 | #define CAS_DEF_MSG_ENABLE \ |
149 | (NETIF_MSG_DRV | \ | 149 | (NETIF_MSG_DRV | \ |
@@ -2136,9 +2136,12 @@ end_copy_pkt: | |||
2136 | if (addr) | 2136 | if (addr) |
2137 | cas_page_unmap(addr); | 2137 | cas_page_unmap(addr); |
2138 | } | 2138 | } |
2139 | skb->csum = csum_unfold(~csum); | ||
2140 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
2141 | skb->protocol = eth_type_trans(skb, cp->dev); | 2139 | skb->protocol = eth_type_trans(skb, cp->dev); |
2140 | if (skb->protocol == htons(ETH_P_IP)) { | ||
2141 | skb->csum = csum_unfold(~csum); | ||
2142 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
2143 | } else | ||
2144 | skb->ip_summed = CHECKSUM_NONE; | ||
2142 | return len; | 2145 | return len; |
2143 | } | 2146 | } |
2144 | 2147 | ||
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 2b5740b3d182..7f3f62e1b113 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/platform_device.h> | 38 | #include <linux/platform_device.h> |
39 | #include <linux/dma-mapping.h> | 39 | #include <linux/dma-mapping.h> |
40 | #include <asm/gpio.h> | 40 | #include <asm/gpio.h> |
41 | #include <asm/atomic.h> | ||
41 | 42 | ||
42 | MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); | 43 | MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); |
43 | MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); | 44 | MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); |
@@ -187,6 +188,7 @@ struct cpmac_desc { | |||
187 | #define CPMAC_EOQ 0x1000 | 188 | #define CPMAC_EOQ 0x1000 |
188 | struct sk_buff *skb; | 189 | struct sk_buff *skb; |
189 | struct cpmac_desc *next; | 190 | struct cpmac_desc *next; |
191 | struct cpmac_desc *prev; | ||
190 | dma_addr_t mapping; | 192 | dma_addr_t mapping; |
191 | dma_addr_t data_mapping; | 193 | dma_addr_t data_mapping; |
192 | }; | 194 | }; |
@@ -208,6 +210,7 @@ struct cpmac_priv { | |||
208 | struct work_struct reset_work; | 210 | struct work_struct reset_work; |
209 | struct platform_device *pdev; | 211 | struct platform_device *pdev; |
210 | struct napi_struct napi; | 212 | struct napi_struct napi; |
213 | atomic_t reset_pending; | ||
211 | }; | 214 | }; |
212 | 215 | ||
213 | static irqreturn_t cpmac_irq(int, void *); | 216 | static irqreturn_t cpmac_irq(int, void *); |
@@ -241,6 +244,16 @@ static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) | |||
241 | printk("\n"); | 244 | printk("\n"); |
242 | } | 245 | } |
243 | 246 | ||
247 | static void cpmac_dump_all_desc(struct net_device *dev) | ||
248 | { | ||
249 | struct cpmac_priv *priv = netdev_priv(dev); | ||
250 | struct cpmac_desc *dump = priv->rx_head; | ||
251 | do { | ||
252 | cpmac_dump_desc(dev, dump); | ||
253 | dump = dump->next; | ||
254 | } while (dump != priv->rx_head); | ||
255 | } | ||
256 | |||
244 | static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) | 257 | static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) |
245 | { | 258 | { |
246 | int i; | 259 | int i; |
@@ -412,21 +425,42 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, | |||
412 | static int cpmac_poll(struct napi_struct *napi, int budget) | 425 | static int cpmac_poll(struct napi_struct *napi, int budget) |
413 | { | 426 | { |
414 | struct sk_buff *skb; | 427 | struct sk_buff *skb; |
415 | struct cpmac_desc *desc; | 428 | struct cpmac_desc *desc, *restart; |
416 | int received = 0; | ||
417 | struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); | 429 | struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); |
430 | int received = 0, processed = 0; | ||
418 | 431 | ||
419 | spin_lock(&priv->rx_lock); | 432 | spin_lock(&priv->rx_lock); |
420 | if (unlikely(!priv->rx_head)) { | 433 | if (unlikely(!priv->rx_head)) { |
421 | if (netif_msg_rx_err(priv) && net_ratelimit()) | 434 | if (netif_msg_rx_err(priv) && net_ratelimit()) |
422 | printk(KERN_WARNING "%s: rx: polling, but no queue\n", | 435 | printk(KERN_WARNING "%s: rx: polling, but no queue\n", |
423 | priv->dev->name); | 436 | priv->dev->name); |
437 | spin_unlock(&priv->rx_lock); | ||
424 | netif_rx_complete(priv->dev, napi); | 438 | netif_rx_complete(priv->dev, napi); |
425 | return 0; | 439 | return 0; |
426 | } | 440 | } |
427 | 441 | ||
428 | desc = priv->rx_head; | 442 | desc = priv->rx_head; |
443 | restart = NULL; | ||
429 | while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { | 444 | while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { |
445 | processed++; | ||
446 | |||
447 | if ((desc->dataflags & CPMAC_EOQ) != 0) { | ||
448 | /* The last update to eoq->hw_next didn't happen | ||
449 | * soon enough, and the receiver stopped here. | ||
450 | *Remember this descriptor so we can restart | ||
451 | * the receiver after freeing some space. | ||
452 | */ | ||
453 | if (unlikely(restart)) { | ||
454 | if (netif_msg_rx_err(priv)) | ||
455 | printk(KERN_ERR "%s: poll found a" | ||
456 | " duplicate EOQ: %p and %p\n", | ||
457 | priv->dev->name, restart, desc); | ||
458 | goto fatal_error; | ||
459 | } | ||
460 | |||
461 | restart = desc->next; | ||
462 | } | ||
463 | |||
430 | skb = cpmac_rx_one(priv, desc); | 464 | skb = cpmac_rx_one(priv, desc); |
431 | if (likely(skb)) { | 465 | if (likely(skb)) { |
432 | netif_receive_skb(skb); | 466 | netif_receive_skb(skb); |
@@ -435,19 +469,90 @@ static int cpmac_poll(struct napi_struct *napi, int budget) | |||
435 | desc = desc->next; | 469 | desc = desc->next; |
436 | } | 470 | } |
437 | 471 | ||
472 | if (desc != priv->rx_head) { | ||
473 | /* We freed some buffers, but not the whole ring, | ||
474 | * add what we did free to the rx list */ | ||
475 | desc->prev->hw_next = (u32)0; | ||
476 | priv->rx_head->prev->hw_next = priv->rx_head->mapping; | ||
477 | } | ||
478 | |||
479 | /* Optimization: If we did not actually process an EOQ (perhaps because | ||
480 | * of quota limits), check to see if the tail of the queue has EOQ set. | ||
481 | * We should immediately restart in that case so that the receiver can | ||
482 | * restart and run in parallel with more packet processing. | ||
483 | * This lets us handle slightly larger bursts before running | ||
484 | * out of ring space (assuming dev->weight < ring_size) */ | ||
485 | |||
486 | if (!restart && | ||
487 | (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) | ||
488 | == CPMAC_EOQ && | ||
489 | (priv->rx_head->dataflags & CPMAC_OWN) != 0) { | ||
490 | /* reset EOQ so the poll loop (above) doesn't try to | ||
491 | * restart this when it eventually gets to this descriptor. | ||
492 | */ | ||
493 | priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; | ||
494 | restart = priv->rx_head; | ||
495 | } | ||
496 | |||
497 | if (restart) { | ||
498 | priv->dev->stats.rx_errors++; | ||
499 | priv->dev->stats.rx_fifo_errors++; | ||
500 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
501 | printk(KERN_WARNING "%s: rx dma ring overrun\n", | ||
502 | priv->dev->name); | ||
503 | |||
504 | if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { | ||
505 | if (netif_msg_drv(priv)) | ||
506 | printk(KERN_ERR "%s: cpmac_poll is trying to " | ||
507 | "restart rx from a descriptor that's " | ||
508 | "not free: %p\n", | ||
509 | priv->dev->name, restart); | ||
510 | goto fatal_error; | ||
511 | } | ||
512 | |||
513 | cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); | ||
514 | } | ||
515 | |||
438 | priv->rx_head = desc; | 516 | priv->rx_head = desc; |
439 | spin_unlock(&priv->rx_lock); | 517 | spin_unlock(&priv->rx_lock); |
440 | if (unlikely(netif_msg_rx_status(priv))) | 518 | if (unlikely(netif_msg_rx_status(priv))) |
441 | printk(KERN_DEBUG "%s: poll processed %d packets\n", | 519 | printk(KERN_DEBUG "%s: poll processed %d packets\n", |
442 | priv->dev->name, received); | 520 | priv->dev->name, received); |
443 | if (desc->dataflags & CPMAC_OWN) { | 521 | if (processed == 0) { |
522 | /* we ran out of packets to read, | ||
523 | * revert to interrupt-driven mode */ | ||
444 | netif_rx_complete(priv->dev, napi); | 524 | netif_rx_complete(priv->dev, napi); |
445 | cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping); | ||
446 | cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); | 525 | cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); |
447 | return 0; | 526 | return 0; |
448 | } | 527 | } |
449 | 528 | ||
450 | return 1; | 529 | return 1; |
530 | |||
531 | fatal_error: | ||
532 | /* Something went horribly wrong. | ||
533 | * Reset hardware to try to recover rather than wedging. */ | ||
534 | |||
535 | if (netif_msg_drv(priv)) { | ||
536 | printk(KERN_ERR "%s: cpmac_poll is confused. " | ||
537 | "Resetting hardware\n", priv->dev->name); | ||
538 | cpmac_dump_all_desc(priv->dev); | ||
539 | printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", | ||
540 | priv->dev->name, | ||
541 | cpmac_read(priv->regs, CPMAC_RX_PTR(0)), | ||
542 | cpmac_read(priv->regs, CPMAC_RX_ACK(0))); | ||
543 | } | ||
544 | |||
545 | spin_unlock(&priv->rx_lock); | ||
546 | netif_rx_complete(priv->dev, napi); | ||
547 | netif_stop_queue(priv->dev); | ||
548 | napi_disable(&priv->napi); | ||
549 | |||
550 | atomic_inc(&priv->reset_pending); | ||
551 | cpmac_hw_stop(priv->dev); | ||
552 | if (!schedule_work(&priv->reset_work)) | ||
553 | atomic_dec(&priv->reset_pending); | ||
554 | return 0; | ||
555 | |||
451 | } | 556 | } |
452 | 557 | ||
453 | static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) | 558 | static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) |
@@ -456,6 +561,9 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
456 | struct cpmac_desc *desc; | 561 | struct cpmac_desc *desc; |
457 | struct cpmac_priv *priv = netdev_priv(dev); | 562 | struct cpmac_priv *priv = netdev_priv(dev); |
458 | 563 | ||
564 | if (unlikely(atomic_read(&priv->reset_pending))) | ||
565 | return NETDEV_TX_BUSY; | ||
566 | |||
459 | if (unlikely(skb_padto(skb, ETH_ZLEN))) | 567 | if (unlikely(skb_padto(skb, ETH_ZLEN))) |
460 | return NETDEV_TX_OK; | 568 | return NETDEV_TX_OK; |
461 | 569 | ||
@@ -621,8 +729,10 @@ static void cpmac_clear_rx(struct net_device *dev) | |||
621 | desc->dataflags = CPMAC_OWN; | 729 | desc->dataflags = CPMAC_OWN; |
622 | dev->stats.rx_dropped++; | 730 | dev->stats.rx_dropped++; |
623 | } | 731 | } |
732 | desc->hw_next = desc->next->mapping; | ||
624 | desc = desc->next; | 733 | desc = desc->next; |
625 | } | 734 | } |
735 | priv->rx_head->prev->hw_next = 0; | ||
626 | } | 736 | } |
627 | 737 | ||
628 | static void cpmac_clear_tx(struct net_device *dev) | 738 | static void cpmac_clear_tx(struct net_device *dev) |
@@ -635,14 +745,14 @@ static void cpmac_clear_tx(struct net_device *dev) | |||
635 | priv->desc_ring[i].dataflags = 0; | 745 | priv->desc_ring[i].dataflags = 0; |
636 | if (priv->desc_ring[i].skb) { | 746 | if (priv->desc_ring[i].skb) { |
637 | dev_kfree_skb_any(priv->desc_ring[i].skb); | 747 | dev_kfree_skb_any(priv->desc_ring[i].skb); |
638 | if (netif_subqueue_stopped(dev, i)) | 748 | priv->desc_ring[i].skb = NULL; |
639 | netif_wake_subqueue(dev, i); | ||
640 | } | 749 | } |
641 | } | 750 | } |
642 | } | 751 | } |
643 | 752 | ||
644 | static void cpmac_hw_error(struct work_struct *work) | 753 | static void cpmac_hw_error(struct work_struct *work) |
645 | { | 754 | { |
755 | int i; | ||
646 | struct cpmac_priv *priv = | 756 | struct cpmac_priv *priv = |
647 | container_of(work, struct cpmac_priv, reset_work); | 757 | container_of(work, struct cpmac_priv, reset_work); |
648 | 758 | ||
@@ -651,8 +761,48 @@ static void cpmac_hw_error(struct work_struct *work) | |||
651 | spin_unlock(&priv->rx_lock); | 761 | spin_unlock(&priv->rx_lock); |
652 | cpmac_clear_tx(priv->dev); | 762 | cpmac_clear_tx(priv->dev); |
653 | cpmac_hw_start(priv->dev); | 763 | cpmac_hw_start(priv->dev); |
654 | napi_enable(&priv->napi); | 764 | barrier(); |
655 | netif_start_queue(priv->dev); | 765 | atomic_dec(&priv->reset_pending); |
766 | |||
767 | for (i = 0; i < CPMAC_QUEUES; i++) | ||
768 | netif_wake_subqueue(priv->dev, i); | ||
769 | netif_wake_queue(priv->dev); | ||
770 | cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); | ||
771 | } | ||
772 | |||
773 | static void cpmac_check_status(struct net_device *dev) | ||
774 | { | ||
775 | struct cpmac_priv *priv = netdev_priv(dev); | ||
776 | |||
777 | u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); | ||
778 | int rx_channel = (macstatus >> 8) & 7; | ||
779 | int rx_code = (macstatus >> 12) & 15; | ||
780 | int tx_channel = (macstatus >> 16) & 7; | ||
781 | int tx_code = (macstatus >> 20) & 15; | ||
782 | |||
783 | if (rx_code || tx_code) { | ||
784 | if (netif_msg_drv(priv) && net_ratelimit()) { | ||
785 | /* Can't find any documentation on what these | ||
786 | *error codes actually are. So just log them and hope.. | ||
787 | */ | ||
788 | if (rx_code) | ||
789 | printk(KERN_WARNING "%s: host error %d on rx " | ||
790 | "channel %d (macstatus %08x), resetting\n", | ||
791 | dev->name, rx_code, rx_channel, macstatus); | ||
792 | if (tx_code) | ||
793 | printk(KERN_WARNING "%s: host error %d on tx " | ||
794 | "channel %d (macstatus %08x), resetting\n", | ||
795 | dev->name, tx_code, tx_channel, macstatus); | ||
796 | } | ||
797 | |||
798 | netif_stop_queue(dev); | ||
799 | cpmac_hw_stop(dev); | ||
800 | if (schedule_work(&priv->reset_work)) | ||
801 | atomic_inc(&priv->reset_pending); | ||
802 | if (unlikely(netif_msg_hw(priv))) | ||
803 | cpmac_dump_regs(dev); | ||
804 | } | ||
805 | cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); | ||
656 | } | 806 | } |
657 | 807 | ||
658 | static irqreturn_t cpmac_irq(int irq, void *dev_id) | 808 | static irqreturn_t cpmac_irq(int irq, void *dev_id) |
@@ -683,49 +833,32 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id) | |||
683 | 833 | ||
684 | cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); | 834 | cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); |
685 | 835 | ||
686 | if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) { | 836 | if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) |
687 | if (netif_msg_drv(priv) && net_ratelimit()) | 837 | cpmac_check_status(dev); |
688 | printk(KERN_ERR "%s: hw error, resetting...\n", | ||
689 | dev->name); | ||
690 | netif_stop_queue(dev); | ||
691 | napi_disable(&priv->napi); | ||
692 | cpmac_hw_stop(dev); | ||
693 | schedule_work(&priv->reset_work); | ||
694 | if (unlikely(netif_msg_hw(priv))) | ||
695 | cpmac_dump_regs(dev); | ||
696 | } | ||
697 | 838 | ||
698 | return IRQ_HANDLED; | 839 | return IRQ_HANDLED; |
699 | } | 840 | } |
700 | 841 | ||
701 | static void cpmac_tx_timeout(struct net_device *dev) | 842 | static void cpmac_tx_timeout(struct net_device *dev) |
702 | { | 843 | { |
703 | struct cpmac_priv *priv = netdev_priv(dev); | ||
704 | int i; | 844 | int i; |
845 | struct cpmac_priv *priv = netdev_priv(dev); | ||
705 | 846 | ||
706 | spin_lock(&priv->lock); | 847 | spin_lock(&priv->lock); |
707 | dev->stats.tx_errors++; | 848 | dev->stats.tx_errors++; |
708 | spin_unlock(&priv->lock); | 849 | spin_unlock(&priv->lock); |
709 | if (netif_msg_tx_err(priv) && net_ratelimit()) | 850 | if (netif_msg_tx_err(priv) && net_ratelimit()) |
710 | printk(KERN_WARNING "%s: transmit timeout\n", dev->name); | 851 | printk(KERN_WARNING "%s: transmit timeout\n", dev->name); |
711 | /* | 852 | |
712 | * FIXME: waking up random queue is not the best thing to | 853 | atomic_inc(&priv->reset_pending); |
713 | * do... on the other hand why we got here at all? | 854 | barrier(); |
714 | */ | 855 | cpmac_clear_tx(dev); |
715 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 856 | barrier(); |
857 | atomic_dec(&priv->reset_pending); | ||
858 | |||
859 | netif_wake_queue(priv->dev); | ||
716 | for (i = 0; i < CPMAC_QUEUES; i++) | 860 | for (i = 0; i < CPMAC_QUEUES; i++) |
717 | if (priv->desc_ring[i].skb) { | 861 | netif_wake_subqueue(dev, i); |
718 | priv->desc_ring[i].dataflags = 0; | ||
719 | dev_kfree_skb_any(priv->desc_ring[i].skb); | ||
720 | netif_wake_subqueue(dev, i); | ||
721 | break; | ||
722 | } | ||
723 | #else | ||
724 | priv->desc_ring[0].dataflags = 0; | ||
725 | if (priv->desc_ring[0].skb) | ||
726 | dev_kfree_skb_any(priv->desc_ring[0].skb); | ||
727 | netif_wake_queue(dev); | ||
728 | #endif | ||
729 | } | 862 | } |
730 | 863 | ||
731 | static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 864 | static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
@@ -901,9 +1034,12 @@ static int cpmac_open(struct net_device *dev) | |||
901 | desc->buflen = CPMAC_SKB_SIZE; | 1034 | desc->buflen = CPMAC_SKB_SIZE; |
902 | desc->dataflags = CPMAC_OWN; | 1035 | desc->dataflags = CPMAC_OWN; |
903 | desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; | 1036 | desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; |
1037 | desc->next->prev = desc; | ||
904 | desc->hw_next = (u32)desc->next->mapping; | 1038 | desc->hw_next = (u32)desc->next->mapping; |
905 | } | 1039 | } |
906 | 1040 | ||
1041 | priv->rx_head->prev->hw_next = (u32)0; | ||
1042 | |||
907 | if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, | 1043 | if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, |
908 | dev->name, dev))) { | 1044 | dev->name, dev))) { |
909 | if (netif_msg_drv(priv)) | 1045 | if (netif_msg_drv(priv)) |
@@ -912,6 +1048,7 @@ static int cpmac_open(struct net_device *dev) | |||
912 | goto fail_irq; | 1048 | goto fail_irq; |
913 | } | 1049 | } |
914 | 1050 | ||
1051 | atomic_set(&priv->reset_pending, 0); | ||
915 | INIT_WORK(&priv->reset_work, cpmac_hw_error); | 1052 | INIT_WORK(&priv->reset_work, cpmac_hw_error); |
916 | cpmac_hw_start(dev); | 1053 | cpmac_hw_start(dev); |
917 | 1054 | ||
@@ -1007,21 +1144,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev) | |||
1007 | 1144 | ||
1008 | if (phy_id == PHY_MAX_ADDR) { | 1145 | if (phy_id == PHY_MAX_ADDR) { |
1009 | if (external_switch || dumb_switch) { | 1146 | if (external_switch || dumb_switch) { |
1010 | struct fixed_phy_status status = {}; | 1147 | mdio_bus_id = 0; /* fixed phys bus */ |
1011 | 1148 | phy_id = pdev->id; | |
1012 | /* | ||
1013 | * FIXME: this should be in the platform code! | ||
1014 | * Since there is not platform code at all (that is, | ||
1015 | * no mainline users of that driver), place it here | ||
1016 | * for now. | ||
1017 | */ | ||
1018 | phy_id = 0; | ||
1019 | status.link = 1; | ||
1020 | status.duplex = 1; | ||
1021 | status.speed = 100; | ||
1022 | fixed_phy_add(PHY_POLL, phy_id, &status); | ||
1023 | } else { | 1149 | } else { |
1024 | printk(KERN_ERR "cpmac: no PHY present\n"); | 1150 | dev_err(&pdev->dev, "no PHY present\n"); |
1025 | return -ENODEV; | 1151 | return -ENODEV; |
1026 | } | 1152 | } |
1027 | } | 1153 | } |
@@ -1064,10 +1190,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev) | |||
1064 | priv->msg_enable = netif_msg_init(debug_level, 0xff); | 1190 | priv->msg_enable = netif_msg_init(debug_level, 0xff); |
1065 | memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); | 1191 | memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); |
1066 | 1192 | ||
1067 | snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); | 1193 | priv->phy = phy_connect(dev, cpmac_mii.phy_map[phy_id]->dev.bus_id, |
1068 | 1194 | &cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII); | |
1069 | priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0, | ||
1070 | PHY_INTERFACE_MODE_MII); | ||
1071 | if (IS_ERR(priv->phy)) { | 1195 | if (IS_ERR(priv->phy)) { |
1072 | if (netif_msg_drv(priv)) | 1196 | if (netif_msg_drv(priv)) |
1073 | printk(KERN_ERR "%s: Could not attach to PHY\n", | 1197 | printk(KERN_ERR "%s: Could not attach to PHY\n", |
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index 348371fda597..fba87abe78ee 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c | |||
@@ -1394,7 +1394,11 @@ net_open(struct net_device *dev) | |||
1394 | #endif | 1394 | #endif |
1395 | if (!result) { | 1395 | if (!result) { |
1396 | printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name); | 1396 | printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name); |
1397 | release_irq: | 1397 | release_dma: |
1398 | #if ALLOW_DMA | ||
1399 | free_dma(dev->dma); | ||
1400 | #endif | ||
1401 | release_irq: | ||
1398 | #if ALLOW_DMA | 1402 | #if ALLOW_DMA |
1399 | release_dma_buff(lp); | 1403 | release_dma_buff(lp); |
1400 | #endif | 1404 | #endif |
@@ -1442,12 +1446,12 @@ net_open(struct net_device *dev) | |||
1442 | if ((result = detect_bnc(dev)) != DETECTED_NONE) | 1446 | if ((result = detect_bnc(dev)) != DETECTED_NONE) |
1443 | break; | 1447 | break; |
1444 | printk(KERN_ERR "%s: no media detected\n", dev->name); | 1448 | printk(KERN_ERR "%s: no media detected\n", dev->name); |
1445 | goto release_irq; | 1449 | goto release_dma; |
1446 | } | 1450 | } |
1447 | switch(result) { | 1451 | switch(result) { |
1448 | case DETECTED_NONE: | 1452 | case DETECTED_NONE: |
1449 | printk(KERN_ERR "%s: no network cable attached to configured media\n", dev->name); | 1453 | printk(KERN_ERR "%s: no network cable attached to configured media\n", dev->name); |
1450 | goto release_irq; | 1454 | goto release_dma; |
1451 | case DETECTED_RJ45H: | 1455 | case DETECTED_RJ45H: |
1452 | printk(KERN_INFO "%s: using half-duplex 10Base-T (RJ-45)\n", dev->name); | 1456 | printk(KERN_INFO "%s: using half-duplex 10Base-T (RJ-45)\n", dev->name); |
1453 | break; | 1457 | break; |
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index d45bcd2660af..864295e081b6 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -903,7 +903,7 @@ dm9000_stop(struct net_device *ndev) | |||
903 | if (netif_msg_ifdown(db)) | 903 | if (netif_msg_ifdown(db)) |
904 | dev_dbg(db->dev, "shutting down %s\n", ndev->name); | 904 | dev_dbg(db->dev, "shutting down %s\n", ndev->name); |
905 | 905 | ||
906 | cancel_delayed_work(&db->phy_poll); | 906 | cancel_delayed_work_sync(&db->phy_poll); |
907 | 907 | ||
908 | netif_stop_queue(ndev); | 908 | netif_stop_queue(ndev); |
909 | netif_carrier_off(ndev); | 909 | netif_carrier_off(ndev); |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 8cbb40f3a506..cab1835173cd 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -4201,8 +4201,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4201 | struct e1000_adapter *adapter; | 4201 | struct e1000_adapter *adapter; |
4202 | struct e1000_hw *hw; | 4202 | struct e1000_hw *hw; |
4203 | const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; | 4203 | const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; |
4204 | unsigned long mmio_start, mmio_len; | 4204 | resource_size_t mmio_start, mmio_len; |
4205 | unsigned long flash_start, flash_len; | 4205 | resource_size_t flash_start, flash_len; |
4206 | 4206 | ||
4207 | static int cards_found; | 4207 | static int cards_found; |
4208 | int i, err, pci_using_dac; | 4208 | int i, err, pci_using_dac; |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index d1b6d4e7495d..075fd547421e 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -1766,16 +1766,20 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa) | |||
1766 | mutex_lock(&ehea_bcmc_regs.lock); | 1766 | mutex_lock(&ehea_bcmc_regs.lock); |
1767 | 1767 | ||
1768 | /* Deregister old MAC in pHYP */ | 1768 | /* Deregister old MAC in pHYP */ |
1769 | ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | 1769 | if (port->state == EHEA_PORT_UP) { |
1770 | if (ret) | 1770 | ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); |
1771 | goto out_upregs; | 1771 | if (ret) |
1772 | goto out_upregs; | ||
1773 | } | ||
1772 | 1774 | ||
1773 | port->mac_addr = cb0->port_mac_addr << 16; | 1775 | port->mac_addr = cb0->port_mac_addr << 16; |
1774 | 1776 | ||
1775 | /* Register new MAC in pHYP */ | 1777 | /* Register new MAC in pHYP */ |
1776 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); | 1778 | if (port->state == EHEA_PORT_UP) { |
1777 | if (ret) | 1779 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); |
1778 | goto out_upregs; | 1780 | if (ret) |
1781 | goto out_upregs; | ||
1782 | } | ||
1779 | 1783 | ||
1780 | ret = 0; | 1784 | ret = 0; |
1781 | 1785 | ||
@@ -2213,8 +2217,6 @@ static void ehea_vlan_rx_register(struct net_device *dev, | |||
2213 | goto out; | 2217 | goto out; |
2214 | } | 2218 | } |
2215 | 2219 | ||
2216 | memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter)); | ||
2217 | |||
2218 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | 2220 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, |
2219 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | 2221 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); |
2220 | if (hret != H_SUCCESS) | 2222 | if (hret != H_SUCCESS) |
@@ -2603,7 +2605,8 @@ static int ehea_stop(struct net_device *dev) | |||
2603 | if (netif_msg_ifdown(port)) | 2605 | if (netif_msg_ifdown(port)) |
2604 | ehea_info("disabling port %s", dev->name); | 2606 | ehea_info("disabling port %s", dev->name); |
2605 | 2607 | ||
2606 | flush_scheduled_work(); | 2608 | cancel_work_sync(&port->reset_task); |
2609 | |||
2607 | mutex_lock(&port->port_lock); | 2610 | mutex_lock(&port->port_lock); |
2608 | netif_stop_queue(dev); | 2611 | netif_stop_queue(dev); |
2609 | port_napi_disable(port); | 2612 | port_napi_disable(port); |
@@ -3178,11 +3181,12 @@ out_err: | |||
3178 | 3181 | ||
3179 | static void ehea_shutdown_single_port(struct ehea_port *port) | 3182 | static void ehea_shutdown_single_port(struct ehea_port *port) |
3180 | { | 3183 | { |
3184 | struct ehea_adapter *adapter = port->adapter; | ||
3181 | unregister_netdev(port->netdev); | 3185 | unregister_netdev(port->netdev); |
3182 | ehea_unregister_port(port); | 3186 | ehea_unregister_port(port); |
3183 | kfree(port->mc_list); | 3187 | kfree(port->mc_list); |
3184 | free_netdev(port->netdev); | 3188 | free_netdev(port->netdev); |
3185 | port->adapter->active_ports--; | 3189 | adapter->active_ports--; |
3186 | } | 3190 | } |
3187 | 3191 | ||
3188 | static int ehea_setup_ports(struct ehea_adapter *adapter) | 3192 | static int ehea_setup_ports(struct ehea_adapter *adapter) |
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c index 46a90e9ec563..c05cb159c772 100644 --- a/drivers/net/enc28j60.c +++ b/drivers/net/enc28j60.c | |||
@@ -400,26 +400,31 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data) | |||
400 | mutex_unlock(&priv->lock); | 400 | mutex_unlock(&priv->lock); |
401 | } | 401 | } |
402 | 402 | ||
403 | /* | 403 | static unsigned long msec20_to_jiffies; |
404 | * Wait until the PHY operation is complete. | 404 | |
405 | */ | 405 | static int poll_ready(struct enc28j60_net *priv, u8 reg, u8 mask, u8 val) |
406 | static int wait_phy_ready(struct enc28j60_net *priv) | ||
407 | { | 406 | { |
408 | unsigned long timeout = jiffies + 20 * HZ / 1000; | 407 | unsigned long timeout = jiffies + msec20_to_jiffies; |
409 | int ret = 1; | ||
410 | 408 | ||
411 | /* 20 msec timeout read */ | 409 | /* 20 msec timeout read */ |
412 | while (nolock_regb_read(priv, MISTAT) & MISTAT_BUSY) { | 410 | while ((nolock_regb_read(priv, reg) & mask) != val) { |
413 | if (time_after(jiffies, timeout)) { | 411 | if (time_after(jiffies, timeout)) { |
414 | if (netif_msg_drv(priv)) | 412 | if (netif_msg_drv(priv)) |
415 | printk(KERN_DEBUG DRV_NAME | 413 | dev_dbg(&priv->spi->dev, |
416 | ": PHY ready timeout!\n"); | 414 | "reg %02x ready timeout!\n", reg); |
417 | ret = 0; | 415 | return -ETIMEDOUT; |
418 | break; | ||
419 | } | 416 | } |
420 | cpu_relax(); | 417 | cpu_relax(); |
421 | } | 418 | } |
422 | return ret; | 419 | return 0; |
420 | } | ||
421 | |||
422 | /* | ||
423 | * Wait until the PHY operation is complete. | ||
424 | */ | ||
425 | static int wait_phy_ready(struct enc28j60_net *priv) | ||
426 | { | ||
427 | return poll_ready(priv, MISTAT, MISTAT_BUSY, 0) ? 0 : 1; | ||
423 | } | 428 | } |
424 | 429 | ||
425 | /* | 430 | /* |
@@ -594,6 +599,32 @@ static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end) | |||
594 | nolock_regw_write(priv, ETXNDL, end); | 599 | nolock_regw_write(priv, ETXNDL, end); |
595 | } | 600 | } |
596 | 601 | ||
602 | /* | ||
603 | * Low power mode shrinks power consumption about 100x, so we'd like | ||
604 | * the chip to be in that mode whenever it's inactive. (However, we | ||
605 | * can't stay in lowpower mode during suspend with WOL active.) | ||
606 | */ | ||
607 | static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low) | ||
608 | { | ||
609 | if (netif_msg_drv(priv)) | ||
610 | dev_dbg(&priv->spi->dev, "%s power...\n", | ||
611 | is_low ? "low" : "high"); | ||
612 | |||
613 | mutex_lock(&priv->lock); | ||
614 | if (is_low) { | ||
615 | nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); | ||
616 | poll_ready(priv, ESTAT, ESTAT_RXBUSY, 0); | ||
617 | poll_ready(priv, ECON1, ECON1_TXRTS, 0); | ||
618 | /* ECON2_VRPS was set during initialization */ | ||
619 | nolock_reg_bfset(priv, ECON2, ECON2_PWRSV); | ||
620 | } else { | ||
621 | nolock_reg_bfclr(priv, ECON2, ECON2_PWRSV); | ||
622 | poll_ready(priv, ESTAT, ESTAT_CLKRDY, ESTAT_CLKRDY); | ||
623 | /* caller sets ECON1_RXEN */ | ||
624 | } | ||
625 | mutex_unlock(&priv->lock); | ||
626 | } | ||
627 | |||
597 | static int enc28j60_hw_init(struct enc28j60_net *priv) | 628 | static int enc28j60_hw_init(struct enc28j60_net *priv) |
598 | { | 629 | { |
599 | u8 reg; | 630 | u8 reg; |
@@ -612,8 +643,8 @@ static int enc28j60_hw_init(struct enc28j60_net *priv) | |||
612 | priv->tx_retry_count = 0; | 643 | priv->tx_retry_count = 0; |
613 | priv->max_pk_counter = 0; | 644 | priv->max_pk_counter = 0; |
614 | priv->rxfilter = RXFILTER_NORMAL; | 645 | priv->rxfilter = RXFILTER_NORMAL; |
615 | /* enable address auto increment */ | 646 | /* enable address auto increment and voltage regulator powersave */ |
616 | nolock_regb_write(priv, ECON2, ECON2_AUTOINC); | 647 | nolock_regb_write(priv, ECON2, ECON2_AUTOINC | ECON2_VRPS); |
617 | 648 | ||
618 | nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT); | 649 | nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT); |
619 | nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT); | 650 | nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT); |
@@ -690,7 +721,7 @@ static int enc28j60_hw_init(struct enc28j60_net *priv) | |||
690 | 721 | ||
691 | static void enc28j60_hw_enable(struct enc28j60_net *priv) | 722 | static void enc28j60_hw_enable(struct enc28j60_net *priv) |
692 | { | 723 | { |
693 | /* enable interrutps */ | 724 | /* enable interrupts */ |
694 | if (netif_msg_hw(priv)) | 725 | if (netif_msg_hw(priv)) |
695 | printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n", | 726 | printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n", |
696 | __FUNCTION__); | 727 | __FUNCTION__); |
@@ -726,15 +757,12 @@ enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex) | |||
726 | int ret = 0; | 757 | int ret = 0; |
727 | 758 | ||
728 | if (!priv->hw_enable) { | 759 | if (!priv->hw_enable) { |
729 | if (autoneg == AUTONEG_DISABLE && speed == SPEED_10) { | 760 | /* link is in low power mode now; duplex setting |
761 | * will take effect on next enc28j60_hw_init(). | ||
762 | */ | ||
763 | if (autoneg == AUTONEG_DISABLE && speed == SPEED_10) | ||
730 | priv->full_duplex = (duplex == DUPLEX_FULL); | 764 | priv->full_duplex = (duplex == DUPLEX_FULL); |
731 | if (!enc28j60_hw_init(priv)) { | 765 | else { |
732 | if (netif_msg_drv(priv)) | ||
733 | dev_err(&ndev->dev, | ||
734 | "hw_reset() failed\n"); | ||
735 | ret = -EINVAL; | ||
736 | } | ||
737 | } else { | ||
738 | if (netif_msg_link(priv)) | 766 | if (netif_msg_link(priv)) |
739 | dev_warn(&ndev->dev, | 767 | dev_warn(&ndev->dev, |
740 | "unsupported link setting\n"); | 768 | "unsupported link setting\n"); |
@@ -1307,7 +1335,8 @@ static int enc28j60_net_open(struct net_device *dev) | |||
1307 | } | 1335 | } |
1308 | return -EADDRNOTAVAIL; | 1336 | return -EADDRNOTAVAIL; |
1309 | } | 1337 | } |
1310 | /* Reset the hardware here */ | 1338 | /* Reset the hardware here (and take it out of low power mode) */ |
1339 | enc28j60_lowpower(priv, false); | ||
1311 | enc28j60_hw_disable(priv); | 1340 | enc28j60_hw_disable(priv); |
1312 | if (!enc28j60_hw_init(priv)) { | 1341 | if (!enc28j60_hw_init(priv)) { |
1313 | if (netif_msg_ifup(priv)) | 1342 | if (netif_msg_ifup(priv)) |
@@ -1337,6 +1366,7 @@ static int enc28j60_net_close(struct net_device *dev) | |||
1337 | printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); | 1366 | printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); |
1338 | 1367 | ||
1339 | enc28j60_hw_disable(priv); | 1368 | enc28j60_hw_disable(priv); |
1369 | enc28j60_lowpower(priv, true); | ||
1340 | netif_stop_queue(dev); | 1370 | netif_stop_queue(dev); |
1341 | 1371 | ||
1342 | return 0; | 1372 | return 0; |
@@ -1537,6 +1567,8 @@ static int __devinit enc28j60_probe(struct spi_device *spi) | |||
1537 | dev->watchdog_timeo = TX_TIMEOUT; | 1567 | dev->watchdog_timeo = TX_TIMEOUT; |
1538 | SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops); | 1568 | SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops); |
1539 | 1569 | ||
1570 | enc28j60_lowpower(priv, true); | ||
1571 | |||
1540 | ret = register_netdev(dev); | 1572 | ret = register_netdev(dev); |
1541 | if (ret) { | 1573 | if (ret) { |
1542 | if (netif_msg_probe(priv)) | 1574 | if (netif_msg_probe(priv)) |
@@ -1556,7 +1588,7 @@ error_alloc: | |||
1556 | return ret; | 1588 | return ret; |
1557 | } | 1589 | } |
1558 | 1590 | ||
1559 | static int enc28j60_remove(struct spi_device *spi) | 1591 | static int __devexit enc28j60_remove(struct spi_device *spi) |
1560 | { | 1592 | { |
1561 | struct enc28j60_net *priv = dev_get_drvdata(&spi->dev); | 1593 | struct enc28j60_net *priv = dev_get_drvdata(&spi->dev); |
1562 | 1594 | ||
@@ -1573,15 +1605,16 @@ static int enc28j60_remove(struct spi_device *spi) | |||
1573 | static struct spi_driver enc28j60_driver = { | 1605 | static struct spi_driver enc28j60_driver = { |
1574 | .driver = { | 1606 | .driver = { |
1575 | .name = DRV_NAME, | 1607 | .name = DRV_NAME, |
1576 | .bus = &spi_bus_type, | ||
1577 | .owner = THIS_MODULE, | 1608 | .owner = THIS_MODULE, |
1578 | }, | 1609 | }, |
1579 | .probe = enc28j60_probe, | 1610 | .probe = enc28j60_probe, |
1580 | .remove = __devexit_p(enc28j60_remove), | 1611 | .remove = __devexit_p(enc28j60_remove), |
1581 | }; | 1612 | }; |
1582 | 1613 | ||
1583 | static int __init enc28j60_init(void) | 1614 | static int __init enc28j60_init(void) |
1584 | { | 1615 | { |
1616 | msec20_to_jiffies = msecs_to_jiffies(20); | ||
1617 | |||
1585 | return spi_register_driver(&enc28j60_driver); | 1618 | return spi_register_driver(&enc28j60_driver); |
1586 | } | 1619 | } |
1587 | 1620 | ||
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index 5f9c42e7a7f1..329edd9c08fc 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c | |||
@@ -78,7 +78,7 @@ module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0); | |||
78 | MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe"); | 78 | MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe"); |
79 | 79 | ||
80 | #define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | 80 | #define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \ |
81 | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFDOWN ) | 81 | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) |
82 | static int debug = -1; /* the above default */ | 82 | static int debug = -1; /* the above default */ |
83 | module_param(debug, int, 0); | 83 | module_param(debug, int, 0); |
84 | MODULE_PARM_DESC(debug, "debugging messages level"); | 84 | MODULE_PARM_DESC(debug, "debugging messages level"); |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 35f66d4a4595..2cb244763292 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -3273,6 +3273,20 @@ static void nv_link_irq(struct net_device *dev) | |||
3273 | dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); | 3273 | dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); |
3274 | } | 3274 | } |
3275 | 3275 | ||
3276 | static void nv_msi_workaround(struct fe_priv *np) | ||
3277 | { | ||
3278 | |||
3279 | /* Need to toggle the msi irq mask within the ethernet device, | ||
3280 | * otherwise, future interrupts will not be detected. | ||
3281 | */ | ||
3282 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
3283 | u8 __iomem *base = np->base; | ||
3284 | |||
3285 | writel(0, base + NvRegMSIIrqMask); | ||
3286 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | ||
3287 | } | ||
3288 | } | ||
3289 | |||
3276 | static irqreturn_t nv_nic_irq(int foo, void *data) | 3290 | static irqreturn_t nv_nic_irq(int foo, void *data) |
3277 | { | 3291 | { |
3278 | struct net_device *dev = (struct net_device *) data; | 3292 | struct net_device *dev = (struct net_device *) data; |
@@ -3295,6 +3309,8 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
3295 | if (!(events & np->irqmask)) | 3309 | if (!(events & np->irqmask)) |
3296 | break; | 3310 | break; |
3297 | 3311 | ||
3312 | nv_msi_workaround(np); | ||
3313 | |||
3298 | spin_lock(&np->lock); | 3314 | spin_lock(&np->lock); |
3299 | nv_tx_done(dev); | 3315 | nv_tx_done(dev); |
3300 | spin_unlock(&np->lock); | 3316 | spin_unlock(&np->lock); |
@@ -3410,6 +3426,8 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) | |||
3410 | if (!(events & np->irqmask)) | 3426 | if (!(events & np->irqmask)) |
3411 | break; | 3427 | break; |
3412 | 3428 | ||
3429 | nv_msi_workaround(np); | ||
3430 | |||
3413 | spin_lock(&np->lock); | 3431 | spin_lock(&np->lock); |
3414 | nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); | 3432 | nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); |
3415 | spin_unlock(&np->lock); | 3433 | spin_unlock(&np->lock); |
@@ -3750,6 +3768,8 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data) | |||
3750 | if (!(events & NVREG_IRQ_TIMER)) | 3768 | if (!(events & NVREG_IRQ_TIMER)) |
3751 | return IRQ_RETVAL(0); | 3769 | return IRQ_RETVAL(0); |
3752 | 3770 | ||
3771 | nv_msi_workaround(np); | ||
3772 | |||
3753 | spin_lock(&np->lock); | 3773 | spin_lock(&np->lock); |
3754 | np->intr_test = 1; | 3774 | np->intr_test = 1; |
3755 | spin_unlock(&np->lock); | 3775 | spin_unlock(&np->lock); |
@@ -5823,6 +5843,7 @@ static int nv_resume(struct pci_dev *pdev) | |||
5823 | writel(txreg, base + NvRegTransmitPoll); | 5843 | writel(txreg, base + NvRegTransmitPoll); |
5824 | 5844 | ||
5825 | rc = nv_open(dev); | 5845 | rc = nv_open(dev); |
5846 | nv_set_multicast(dev); | ||
5826 | out: | 5847 | out: |
5827 | return rc; | 5848 | return rc; |
5828 | } | 5849 | } |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 67b4b0728fce..a5baaf59ff66 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -1093,7 +1093,7 @@ err: | |||
1093 | if (registered) | 1093 | if (registered) |
1094 | unregister_netdev(ndev); | 1094 | unregister_netdev(ndev); |
1095 | 1095 | ||
1096 | if (fep != NULL) { | 1096 | if (fep && fep->ops) { |
1097 | (*fep->ops->free_bd)(ndev); | 1097 | (*fep->ops->free_bd)(ndev); |
1098 | (*fep->ops->cleanup_data)(ndev); | 1098 | (*fep->ops->cleanup_data)(ndev); |
1099 | } | 1099 | } |
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index dde9c7e6408a..00bc7fbb6b37 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c | |||
@@ -959,7 +959,7 @@ static int epp_close(struct net_device *dev) | |||
959 | unsigned char tmp[1]; | 959 | unsigned char tmp[1]; |
960 | 960 | ||
961 | bc->work_running = 0; | 961 | bc->work_running = 0; |
962 | flush_scheduled_work(); | 962 | cancel_delayed_work_sync(&bc->run_work); |
963 | bc->stat = EPP_DCDBIT; | 963 | bc->stat = EPP_DCDBIT; |
964 | tmp[0] = 0; | 964 | tmp[0] = 0; |
965 | pp->ops->epp_write_addr(pp, tmp, 1, 0); | 965 | pp->ops->epp_write_addr(pp, tmp, 1, 0); |
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index f90515935833..45ae9d1191d7 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c | |||
@@ -1340,9 +1340,10 @@ static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, uns | |||
1340 | case PARAM_RTS: | 1340 | case PARAM_RTS: |
1341 | if ( !(scc->wreg[R5] & RTS) ) | 1341 | if ( !(scc->wreg[R5] & RTS) ) |
1342 | { | 1342 | { |
1343 | if (arg != TX_OFF) | 1343 | if (arg != TX_OFF) { |
1344 | scc_key_trx(scc, TX_ON); | 1344 | scc_key_trx(scc, TX_ON); |
1345 | scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay); | 1345 | scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay); |
1346 | } | ||
1346 | } else { | 1347 | } else { |
1347 | if (arg == TX_OFF) | 1348 | if (arg == TX_OFF) |
1348 | { | 1349 | { |
diff --git a/drivers/net/ibm_newemac/Kconfig b/drivers/net/ibm_newemac/Kconfig index 0d3e7380bad0..70a3272ee998 100644 --- a/drivers/net/ibm_newemac/Kconfig +++ b/drivers/net/ibm_newemac/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config IBM_NEW_EMAC | 1 | config IBM_NEW_EMAC |
2 | tristate "IBM EMAC Ethernet support" | 2 | tristate "IBM EMAC Ethernet support" |
3 | depends on PPC_DCR && PPC_MERGE | 3 | depends on PPC_DCR && PPC_MERGE |
4 | select CRC32 | ||
4 | help | 5 | help |
5 | This driver supports the IBM EMAC family of Ethernet controllers | 6 | This driver supports the IBM EMAC family of Ethernet controllers |
6 | typically found on 4xx embedded PowerPC chips, but also on the | 7 | typically found on 4xx embedded PowerPC chips, but also on the |
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c index 9b358f61ed7f..679a0826780e 100644 --- a/drivers/net/ipg.c +++ b/drivers/net/ipg.c | |||
@@ -577,12 +577,12 @@ static void ipg_nic_set_multicast_list(struct net_device *dev) | |||
577 | /* NIC to be configured in promiscuous mode. */ | 577 | /* NIC to be configured in promiscuous mode. */ |
578 | receivemode = IPG_RM_RECEIVEALLFRAMES; | 578 | receivemode = IPG_RM_RECEIVEALLFRAMES; |
579 | } else if ((dev->flags & IFF_ALLMULTI) || | 579 | } else if ((dev->flags & IFF_ALLMULTI) || |
580 | (dev->flags & IFF_MULTICAST & | 580 | ((dev->flags & IFF_MULTICAST) && |
581 | (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) { | 581 | (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) { |
582 | /* NIC to be configured to receive all multicast | 582 | /* NIC to be configured to receive all multicast |
583 | * frames. */ | 583 | * frames. */ |
584 | receivemode |= IPG_RM_RECEIVEMULTICAST; | 584 | receivemode |= IPG_RM_RECEIVEMULTICAST; |
585 | } else if (dev->flags & IFF_MULTICAST & (dev->mc_count > 0)) { | 585 | } else if ((dev->flags & IFF_MULTICAST) && (dev->mc_count > 0)) { |
586 | /* NIC to be configured to receive selected | 586 | /* NIC to be configured to receive selected |
587 | * multicast addresses. */ | 587 | * multicast addresses. */ |
588 | receivemode |= IPG_RM_RECEIVEMULTICASTHASH; | 588 | receivemode |= IPG_RM_RECEIVEMULTICASTHASH; |
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index ce816ba9c40d..e6317557a531 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig | |||
@@ -329,6 +329,7 @@ config PXA_FICP | |||
329 | config MCS_FIR | 329 | config MCS_FIR |
330 | tristate "MosChip MCS7780 IrDA-USB dongle" | 330 | tristate "MosChip MCS7780 IrDA-USB dongle" |
331 | depends on IRDA && USB && EXPERIMENTAL | 331 | depends on IRDA && USB && EXPERIMENTAL |
332 | select CRC32 | ||
332 | help | 333 | help |
333 | Say Y or M here if you want to build support for the MosChip | 334 | Say Y or M here if you want to build support for the MosChip |
334 | MCS7780 IrDA-USB bridge device driver. | 335 | MCS7780 IrDA-USB bridge device driver. |
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 9081234ab458..6f50ed7b183f 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c | |||
@@ -1120,7 +1120,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self) | |||
1120 | } | 1120 | } |
1121 | } | 1121 | } |
1122 | 1122 | ||
1123 | if (self->usbdev->descriptor.bcdDevice == fw_version) { | 1123 | if (self->usbdev->descriptor.bcdDevice == cpu_to_le16(fw_version)) { |
1124 | /* | 1124 | /* |
1125 | * If we're here, we've found a correct patch | 1125 | * If we're here, we've found a correct patch |
1126 | * The actual image starts after the "STMP" keyword | 1126 | * The actual image starts after the "STMP" keyword |
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h index e846c38224a3..a0ca9c1fe196 100644 --- a/drivers/net/irda/irda-usb.h +++ b/drivers/net/irda/irda-usb.h | |||
@@ -117,11 +117,11 @@ | |||
117 | struct irda_class_desc { | 117 | struct irda_class_desc { |
118 | __u8 bLength; | 118 | __u8 bLength; |
119 | __u8 bDescriptorType; | 119 | __u8 bDescriptorType; |
120 | __u16 bcdSpecRevision; | 120 | __le16 bcdSpecRevision; |
121 | __u8 bmDataSize; | 121 | __u8 bmDataSize; |
122 | __u8 bmWindowSize; | 122 | __u8 bmWindowSize; |
123 | __u8 bmMinTurnaroundTime; | 123 | __u8 bmMinTurnaroundTime; |
124 | __u16 wBaudRate; | 124 | __le16 wBaudRate; |
125 | __u8 bmAdditionalBOFs; | 125 | __u8 bmAdditionalBOFs; |
126 | __u8 bIrdaRateSniff; | 126 | __u8 bIrdaRateSniff; |
127 | __u8 bMaxUnicastList; | 127 | __u8 bMaxUnicastList; |
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 6321b059ce13..2f38e847e2cd 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c | |||
@@ -58,8 +58,8 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); | |||
58 | 58 | ||
59 | static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) | 59 | static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) |
60 | { | 60 | { |
61 | hw->mac.num_rx_queues = IXGBE_82598_MAX_TX_QUEUES; | 61 | hw->mac.num_rx_queues = IXGBE_82598_MAX_RX_QUEUES; |
62 | hw->mac.num_tx_queues = IXGBE_82598_MAX_RX_QUEUES; | 62 | hw->mac.num_tx_queues = IXGBE_82598_MAX_TX_QUEUES; |
63 | hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES; | 63 | hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES; |
64 | 64 | ||
65 | /* PHY ops are filled in by default properly for Fiber only */ | 65 | /* PHY ops are filled in by default properly for Fiber only */ |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index c91b12ea26ad..e0d76c75aea0 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -75,7 +75,7 @@ | |||
75 | #include "myri10ge_mcp.h" | 75 | #include "myri10ge_mcp.h" |
76 | #include "myri10ge_mcp_gen_header.h" | 76 | #include "myri10ge_mcp_gen_header.h" |
77 | 77 | ||
78 | #define MYRI10GE_VERSION_STR "1.3.2-1.287" | 78 | #define MYRI10GE_VERSION_STR "1.3.99-1.347" |
79 | 79 | ||
80 | MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); | 80 | MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); |
81 | MODULE_AUTHOR("Maintainer: help@myri.com"); | 81 | MODULE_AUTHOR("Maintainer: help@myri.com"); |
@@ -631,7 +631,7 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp) | |||
631 | return status; | 631 | return status; |
632 | } | 632 | } |
633 | 633 | ||
634 | int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) | 634 | static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) |
635 | { | 635 | { |
636 | struct myri10ge_cmd cmd; | 636 | struct myri10ge_cmd cmd; |
637 | int status; | 637 | int status; |
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 8cb29f5b1038..da4c4fb97064 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -776,7 +776,6 @@ struct netxen_hardware_context { | |||
776 | 776 | ||
777 | u8 revision_id; | 777 | u8 revision_id; |
778 | u16 board_type; | 778 | u16 board_type; |
779 | u16 max_ports; | ||
780 | struct netxen_board_info boardcfg; | 779 | struct netxen_board_info boardcfg; |
781 | u32 xg_linkup; | 780 | u32 xg_linkup; |
782 | u32 qg_linksup; | 781 | u32 qg_linksup; |
@@ -863,6 +862,7 @@ struct netxen_adapter { | |||
863 | unsigned char mac_addr[ETH_ALEN]; | 862 | unsigned char mac_addr[ETH_ALEN]; |
864 | int mtu; | 863 | int mtu; |
865 | int portnum; | 864 | int portnum; |
865 | u8 physical_port; | ||
866 | 866 | ||
867 | struct work_struct watchdog_task; | 867 | struct work_struct watchdog_task; |
868 | struct timer_list watchdog_timer; | 868 | struct timer_list watchdog_timer; |
@@ -1034,7 +1034,6 @@ int netxen_rom_se(struct netxen_adapter *adapter, int addr); | |||
1034 | 1034 | ||
1035 | /* Functions from netxen_nic_isr.c */ | 1035 | /* Functions from netxen_nic_isr.c */ |
1036 | void netxen_initialize_adapter_sw(struct netxen_adapter *adapter); | 1036 | void netxen_initialize_adapter_sw(struct netxen_adapter *adapter); |
1037 | void netxen_initialize_adapter_hw(struct netxen_adapter *adapter); | ||
1038 | void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr, | 1037 | void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr, |
1039 | struct pci_dev **used_dev); | 1038 | struct pci_dev **used_dev); |
1040 | void netxen_initialize_adapter_ops(struct netxen_adapter *adapter); | 1039 | void netxen_initialize_adapter_ops(struct netxen_adapter *adapter); |
@@ -1077,20 +1076,6 @@ static const struct netxen_brdinfo netxen_boards[] = { | |||
1077 | 1076 | ||
1078 | #define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) | 1077 | #define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) |
1079 | 1078 | ||
1080 | static inline void get_brd_port_by_type(u32 type, int *ports) | ||
1081 | { | ||
1082 | int i, found = 0; | ||
1083 | for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { | ||
1084 | if (netxen_boards[i].brdtype == type) { | ||
1085 | *ports = netxen_boards[i].ports; | ||
1086 | found = 1; | ||
1087 | break; | ||
1088 | } | ||
1089 | } | ||
1090 | if (!found) | ||
1091 | *ports = 0; | ||
1092 | } | ||
1093 | |||
1094 | static inline void get_brd_name_by_type(u32 type, char *name) | 1079 | static inline void get_brd_name_by_type(u32 type, char *name) |
1095 | { | 1080 | { |
1096 | int i, found = 0; | 1081 | int i, found = 0; |
@@ -1169,5 +1154,4 @@ extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, | |||
1169 | 1154 | ||
1170 | extern struct ethtool_ops netxen_nic_ethtool_ops; | 1155 | extern struct ethtool_ops netxen_nic_ethtool_ops; |
1171 | 1156 | ||
1172 | extern int physical_port[]; /* physical port # from virtual port.*/ | ||
1173 | #endif /* __NETXEN_NIC_H_ */ | 1157 | #endif /* __NETXEN_NIC_H_ */ |
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 6e98d830eefb..723487bf200c 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c | |||
@@ -369,7 +369,7 @@ netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) | |||
369 | for (i = 3; niu_registers[mode].reg[i - 3] != -1; i++) { | 369 | for (i = 3; niu_registers[mode].reg[i - 3] != -1; i++) { |
370 | /* GB: port specific registers */ | 370 | /* GB: port specific registers */ |
371 | if (mode == 0 && i >= 19) | 371 | if (mode == 0 && i >= 19) |
372 | window = physical_port[adapter->portnum] * | 372 | window = adapter->physical_port * |
373 | NETXEN_NIC_PORT_WINDOW; | 373 | NETXEN_NIC_PORT_WINDOW; |
374 | 374 | ||
375 | NETXEN_NIC_LOCKED_READ_REG(niu_registers[mode]. | 375 | NETXEN_NIC_LOCKED_READ_REG(niu_registers[mode]. |
@@ -527,7 +527,7 @@ netxen_nic_get_pauseparam(struct net_device *dev, | |||
527 | { | 527 | { |
528 | struct netxen_adapter *adapter = netdev_priv(dev); | 528 | struct netxen_adapter *adapter = netdev_priv(dev); |
529 | __u32 val; | 529 | __u32 val; |
530 | int port = physical_port[adapter->portnum]; | 530 | int port = adapter->physical_port; |
531 | 531 | ||
532 | if (adapter->ahw.board_type == NETXEN_NIC_GBE) { | 532 | if (adapter->ahw.board_type == NETXEN_NIC_GBE) { |
533 | if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) | 533 | if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) |
@@ -573,7 +573,7 @@ netxen_nic_set_pauseparam(struct net_device *dev, | |||
573 | { | 573 | { |
574 | struct netxen_adapter *adapter = netdev_priv(dev); | 574 | struct netxen_adapter *adapter = netdev_priv(dev); |
575 | __u32 val; | 575 | __u32 val; |
576 | int port = physical_port[adapter->portnum]; | 576 | int port = adapter->physical_port; |
577 | /* read mode */ | 577 | /* read mode */ |
578 | if (adapter->ahw.board_type == NETXEN_NIC_GBE) { | 578 | if (adapter->ahw.board_type == NETXEN_NIC_GBE) { |
579 | if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) | 579 | if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) |
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index af7356468251..c43d06b8de9b 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c | |||
@@ -396,11 +396,8 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter) | |||
396 | } | 396 | } |
397 | adapter->intr_scheme = readl( | 397 | adapter->intr_scheme = readl( |
398 | NETXEN_CRB_NORMALIZE(adapter, CRB_NIC_CAPABILITIES_FW)); | 398 | NETXEN_CRB_NORMALIZE(adapter, CRB_NIC_CAPABILITIES_FW)); |
399 | printk(KERN_NOTICE "%s: FW capabilities:0x%x\n", netxen_nic_driver_name, | ||
400 | adapter->intr_scheme); | ||
401 | adapter->msi_mode = readl( | 399 | adapter->msi_mode = readl( |
402 | NETXEN_CRB_NORMALIZE(adapter, CRB_NIC_MSI_MODE_FW)); | 400 | NETXEN_CRB_NORMALIZE(adapter, CRB_NIC_MSI_MODE_FW)); |
403 | DPRINTK(INFO, "Receive Peg ready too. starting stuff\n"); | ||
404 | 401 | ||
405 | addr = netxen_alloc(adapter->ahw.pdev, | 402 | addr = netxen_alloc(adapter->ahw.pdev, |
406 | sizeof(struct netxen_ring_ctx) + | 403 | sizeof(struct netxen_ring_ctx) + |
@@ -408,8 +405,6 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter) | |||
408 | (dma_addr_t *) & adapter->ctx_desc_phys_addr, | 405 | (dma_addr_t *) & adapter->ctx_desc_phys_addr, |
409 | &adapter->ctx_desc_pdev); | 406 | &adapter->ctx_desc_pdev); |
410 | 407 | ||
411 | printk(KERN_INFO "ctx_desc_phys_addr: 0x%llx\n", | ||
412 | (unsigned long long) adapter->ctx_desc_phys_addr); | ||
413 | if (addr == NULL) { | 408 | if (addr == NULL) { |
414 | DPRINTK(ERR, "bad return from pci_alloc_consistent\n"); | 409 | DPRINTK(ERR, "bad return from pci_alloc_consistent\n"); |
415 | err = -ENOMEM; | 410 | err = -ENOMEM; |
@@ -429,8 +424,6 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter) | |||
429 | adapter->max_tx_desc_count, | 424 | adapter->max_tx_desc_count, |
430 | (dma_addr_t *) & hw->cmd_desc_phys_addr, | 425 | (dma_addr_t *) & hw->cmd_desc_phys_addr, |
431 | &adapter->ahw.cmd_desc_pdev); | 426 | &adapter->ahw.cmd_desc_pdev); |
432 | printk(KERN_INFO "cmd_desc_phys_addr: 0x%llx\n", | ||
433 | (unsigned long long) hw->cmd_desc_phys_addr); | ||
434 | 427 | ||
435 | if (addr == NULL) { | 428 | if (addr == NULL) { |
436 | DPRINTK(ERR, "bad return from pci_alloc_consistent\n"); | 429 | DPRINTK(ERR, "bad return from pci_alloc_consistent\n"); |
@@ -1032,15 +1025,15 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter) | |||
1032 | int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu) | 1025 | int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu) |
1033 | { | 1026 | { |
1034 | netxen_nic_write_w0(adapter, | 1027 | netxen_nic_write_w0(adapter, |
1035 | NETXEN_NIU_GB_MAX_FRAME_SIZE( | 1028 | NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port), |
1036 | physical_port[adapter->portnum]), new_mtu); | 1029 | new_mtu); |
1037 | return 0; | 1030 | return 0; |
1038 | } | 1031 | } |
1039 | 1032 | ||
1040 | int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) | 1033 | int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) |
1041 | { | 1034 | { |
1042 | new_mtu += NETXEN_NIU_HDRSIZE + NETXEN_NIU_TLRSIZE; | 1035 | new_mtu += NETXEN_NIU_HDRSIZE + NETXEN_NIU_TLRSIZE; |
1043 | if (physical_port[adapter->portnum] == 0) | 1036 | if (adapter->physical_port == 0) |
1044 | netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, | 1037 | netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, |
1045 | new_mtu); | 1038 | new_mtu); |
1046 | else | 1039 | else |
@@ -1051,7 +1044,7 @@ int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) | |||
1051 | 1044 | ||
1052 | void netxen_nic_init_niu_gb(struct netxen_adapter *adapter) | 1045 | void netxen_nic_init_niu_gb(struct netxen_adapter *adapter) |
1053 | { | 1046 | { |
1054 | netxen_niu_gbe_init_port(adapter, physical_port[adapter->portnum]); | 1047 | netxen_niu_gbe_init_port(adapter, adapter->physical_port); |
1055 | } | 1048 | } |
1056 | 1049 | ||
1057 | void | 1050 | void |
@@ -1127,7 +1120,6 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) | |||
1127 | 1120 | ||
1128 | void netxen_nic_flash_print(struct netxen_adapter *adapter) | 1121 | void netxen_nic_flash_print(struct netxen_adapter *adapter) |
1129 | { | 1122 | { |
1130 | int valid = 1; | ||
1131 | u32 fw_major = 0; | 1123 | u32 fw_major = 0; |
1132 | u32 fw_minor = 0; | 1124 | u32 fw_minor = 0; |
1133 | u32 fw_build = 0; | 1125 | u32 fw_build = 0; |
@@ -1137,70 +1129,62 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter) | |||
1137 | __le32 *ptr32; | 1129 | __le32 *ptr32; |
1138 | 1130 | ||
1139 | struct netxen_board_info *board_info = &(adapter->ahw.boardcfg); | 1131 | struct netxen_board_info *board_info = &(adapter->ahw.boardcfg); |
1140 | if (board_info->magic != NETXEN_BDINFO_MAGIC) { | 1132 | |
1141 | printk | 1133 | adapter->driver_mismatch = 0; |
1142 | ("NetXen Unknown board config, Read 0x%x expected as 0x%x\n", | 1134 | |
1143 | board_info->magic, NETXEN_BDINFO_MAGIC); | 1135 | ptr32 = (u32 *)&serial_num; |
1144 | valid = 0; | 1136 | addr = NETXEN_USER_START + |
1145 | } | 1137 | offsetof(struct netxen_new_user_info, serial_num); |
1146 | if (board_info->header_version != NETXEN_BDINFO_VERSION) { | 1138 | for (i = 0; i < 8; i++) { |
1147 | printk("NetXen Unknown board config version." | 1139 | if (netxen_rom_fast_read(adapter, addr, ptr32) == -1) { |
1148 | " Read %x, expected %x\n", | 1140 | printk("%s: ERROR reading %s board userarea.\n", |
1149 | board_info->header_version, NETXEN_BDINFO_VERSION); | 1141 | netxen_nic_driver_name, |
1150 | valid = 0; | 1142 | netxen_nic_driver_name); |
1151 | } | 1143 | adapter->driver_mismatch = 1; |
1152 | if (valid) { | 1144 | return; |
1153 | ptr32 = (u32 *)&serial_num; | ||
1154 | addr = NETXEN_USER_START + | ||
1155 | offsetof(struct netxen_new_user_info, serial_num); | ||
1156 | for (i = 0; i < 8; i++) { | ||
1157 | if (netxen_rom_fast_read(adapter, addr, ptr32) == -1) { | ||
1158 | printk("%s: ERROR reading %s board userarea.\n", | ||
1159 | netxen_nic_driver_name, | ||
1160 | netxen_nic_driver_name); | ||
1161 | return; | ||
1162 | } | ||
1163 | ptr32++; | ||
1164 | addr += sizeof(u32); | ||
1165 | } | 1145 | } |
1146 | ptr32++; | ||
1147 | addr += sizeof(u32); | ||
1148 | } | ||
1149 | |||
1150 | fw_major = readl(NETXEN_CRB_NORMALIZE(adapter, | ||
1151 | NETXEN_FW_VERSION_MAJOR)); | ||
1152 | fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter, | ||
1153 | NETXEN_FW_VERSION_MINOR)); | ||
1154 | fw_build = | ||
1155 | readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB)); | ||
1166 | 1156 | ||
1157 | if (adapter->portnum == 0) { | ||
1167 | get_brd_name_by_type(board_info->board_type, brd_name); | 1158 | get_brd_name_by_type(board_info->board_type, brd_name); |
1168 | 1159 | ||
1169 | printk("NetXen %s Board S/N %s Chip id 0x%x\n", | 1160 | printk("NetXen %s Board S/N %s Chip id 0x%x\n", |
1170 | brd_name, serial_num, board_info->chip_id); | 1161 | brd_name, serial_num, board_info->chip_id); |
1171 | 1162 | printk("NetXen Firmware version %d.%d.%d\n", fw_major, | |
1172 | printk("NetXen %s Board #%d, Chip id 0x%x\n", | 1163 | fw_minor, fw_build); |
1173 | board_info->board_type == 0x0b ? "XGB" : "GBE", | ||
1174 | board_info->board_num, board_info->chip_id); | ||
1175 | fw_major = readl(NETXEN_CRB_NORMALIZE(adapter, | ||
1176 | NETXEN_FW_VERSION_MAJOR)); | ||
1177 | fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter, | ||
1178 | NETXEN_FW_VERSION_MINOR)); | ||
1179 | fw_build = | ||
1180 | readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB)); | ||
1181 | |||
1182 | printk("NetXen Firmware version %d.%d.%d\n", fw_major, fw_minor, | ||
1183 | fw_build); | ||
1184 | } | 1164 | } |
1165 | |||
1185 | if (fw_major != _NETXEN_NIC_LINUX_MAJOR) { | 1166 | if (fw_major != _NETXEN_NIC_LINUX_MAJOR) { |
1186 | printk(KERN_ERR "The mismatch in driver version and firmware " | ||
1187 | "version major number\n" | ||
1188 | "Driver version major number = %d \t" | ||
1189 | "Firmware version major number = %d \n", | ||
1190 | _NETXEN_NIC_LINUX_MAJOR, fw_major); | ||
1191 | adapter->driver_mismatch = 1; | 1167 | adapter->driver_mismatch = 1; |
1192 | } | 1168 | } |
1193 | if (fw_minor != _NETXEN_NIC_LINUX_MINOR && | 1169 | if (fw_minor != _NETXEN_NIC_LINUX_MINOR && |
1194 | fw_minor != (_NETXEN_NIC_LINUX_MINOR + 1)) { | 1170 | fw_minor != (_NETXEN_NIC_LINUX_MINOR + 1)) { |
1195 | printk(KERN_ERR "The mismatch in driver version and firmware " | ||
1196 | "version minor number\n" | ||
1197 | "Driver version minor number = %d \t" | ||
1198 | "Firmware version minor number = %d \n", | ||
1199 | _NETXEN_NIC_LINUX_MINOR, fw_minor); | ||
1200 | adapter->driver_mismatch = 1; | 1171 | adapter->driver_mismatch = 1; |
1201 | } | 1172 | } |
1202 | if (adapter->driver_mismatch) | 1173 | if (adapter->driver_mismatch) { |
1203 | printk(KERN_INFO "Use the driver with version no %d.%d.xxx\n", | 1174 | printk(KERN_ERR "%s: driver and firmware version mismatch\n", |
1204 | fw_major, fw_minor); | 1175 | adapter->netdev->name); |
1176 | return; | ||
1177 | } | ||
1178 | |||
1179 | switch (adapter->ahw.board_type) { | ||
1180 | case NETXEN_NIC_GBE: | ||
1181 | dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", | ||
1182 | adapter->netdev->name); | ||
1183 | break; | ||
1184 | case NETXEN_NIC_XGBE: | ||
1185 | dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", | ||
1186 | adapter->netdev->name); | ||
1187 | break; | ||
1188 | } | ||
1205 | } | 1189 | } |
1206 | 1190 | ||
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 45fa33e0cb90..70d1b22ced22 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -203,21 +203,6 @@ void netxen_initialize_adapter_sw(struct netxen_adapter *adapter) | |||
203 | } | 203 | } |
204 | } | 204 | } |
205 | 205 | ||
206 | void netxen_initialize_adapter_hw(struct netxen_adapter *adapter) | ||
207 | { | ||
208 | int ports = 0; | ||
209 | struct netxen_board_info *board_info = &(adapter->ahw.boardcfg); | ||
210 | |||
211 | if (netxen_nic_get_board_info(adapter) != 0) | ||
212 | printk("%s: Error getting board config info.\n", | ||
213 | netxen_nic_driver_name); | ||
214 | get_brd_port_by_type(board_info->board_type, &ports); | ||
215 | if (ports == 0) | ||
216 | printk(KERN_ERR "%s: Unknown board type\n", | ||
217 | netxen_nic_driver_name); | ||
218 | adapter->ahw.max_ports = ports; | ||
219 | } | ||
220 | |||
221 | void netxen_initialize_adapter_ops(struct netxen_adapter *adapter) | 206 | void netxen_initialize_adapter_ops(struct netxen_adapter *adapter) |
222 | { | 207 | { |
223 | switch (adapter->ahw.board_type) { | 208 | switch (adapter->ahw.board_type) { |
@@ -765,18 +750,13 @@ int netxen_flash_unlock(struct netxen_adapter *adapter) | |||
765 | 750 | ||
766 | int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) | 751 | int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) |
767 | { | 752 | { |
768 | int addr, val, status; | 753 | int addr, val; |
769 | int n, i; | 754 | int n, i; |
770 | int init_delay = 0; | 755 | int init_delay = 0; |
771 | struct crb_addr_pair *buf; | 756 | struct crb_addr_pair *buf; |
772 | u32 off; | 757 | u32 off; |
773 | 758 | ||
774 | /* resetall */ | 759 | /* resetall */ |
775 | status = netxen_nic_get_board_info(adapter); | ||
776 | if (status) | ||
777 | printk("%s: netxen_pinit_from_rom: Error getting board info\n", | ||
778 | netxen_nic_driver_name); | ||
779 | |||
780 | netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET, | 760 | netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET, |
781 | NETXEN_ROMBUS_RESET); | 761 | NETXEN_ROMBUS_RESET); |
782 | 762 | ||
@@ -860,10 +840,10 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) | |||
860 | netxen_nic_pci_change_crbwindow(adapter, 1); | 840 | netxen_nic_pci_change_crbwindow(adapter, 1); |
861 | } | 841 | } |
862 | if (init_delay == 1) { | 842 | if (init_delay == 1) { |
863 | msleep(2000); | 843 | msleep(1000); |
864 | init_delay = 0; | 844 | init_delay = 0; |
865 | } | 845 | } |
866 | msleep(20); | 846 | msleep(1); |
867 | } | 847 | } |
868 | kfree(buf); | 848 | kfree(buf); |
869 | 849 | ||
@@ -938,12 +918,28 @@ int netxen_initialize_adapter_offload(struct netxen_adapter *adapter) | |||
938 | 918 | ||
939 | void netxen_free_adapter_offload(struct netxen_adapter *adapter) | 919 | void netxen_free_adapter_offload(struct netxen_adapter *adapter) |
940 | { | 920 | { |
921 | int i; | ||
922 | |||
941 | if (adapter->dummy_dma.addr) { | 923 | if (adapter->dummy_dma.addr) { |
942 | pci_free_consistent(adapter->ahw.pdev, | 924 | i = 100; |
925 | do { | ||
926 | if (dma_watchdog_shutdown_request(adapter) == 1) | ||
927 | break; | ||
928 | msleep(50); | ||
929 | if (dma_watchdog_shutdown_poll_result(adapter) == 1) | ||
930 | break; | ||
931 | } while (--i); | ||
932 | |||
933 | if (i) { | ||
934 | pci_free_consistent(adapter->ahw.pdev, | ||
943 | NETXEN_HOST_DUMMY_DMA_SIZE, | 935 | NETXEN_HOST_DUMMY_DMA_SIZE, |
944 | adapter->dummy_dma.addr, | 936 | adapter->dummy_dma.addr, |
945 | adapter->dummy_dma.phys_addr); | 937 | adapter->dummy_dma.phys_addr); |
946 | adapter->dummy_dma.addr = NULL; | 938 | adapter->dummy_dma.addr = NULL; |
939 | } else { | ||
940 | printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", | ||
941 | adapter->netdev->name); | ||
942 | } | ||
947 | } | 943 | } |
948 | } | 944 | } |
949 | 945 | ||
diff --git a/drivers/net/netxen/netxen_nic_isr.c b/drivers/net/netxen/netxen_nic_isr.c index f487615f4063..96cec41f9019 100644 --- a/drivers/net/netxen/netxen_nic_isr.c +++ b/drivers/net/netxen/netxen_nic_isr.c | |||
@@ -145,7 +145,7 @@ static void netxen_nic_isr_other(struct netxen_adapter *adapter) | |||
145 | 145 | ||
146 | /* verify the offset */ | 146 | /* verify the offset */ |
147 | val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE)); | 147 | val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE)); |
148 | val = val >> physical_port[adapter->portnum]; | 148 | val = val >> adapter->physical_port; |
149 | if (val == adapter->ahw.qg_linksup) | 149 | if (val == adapter->ahw.qg_linksup) |
150 | return; | 150 | return; |
151 | 151 | ||
@@ -199,7 +199,7 @@ void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter) | |||
199 | 199 | ||
200 | /* WINDOW = 1 */ | 200 | /* WINDOW = 1 */ |
201 | val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE)); | 201 | val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE)); |
202 | val >>= (physical_port[adapter->portnum] * 8); | 202 | val >>= (adapter->physical_port * 8); |
203 | val &= 0xff; | 203 | val &= 0xff; |
204 | 204 | ||
205 | if (adapter->ahw.xg_linkup == 1 && val != XG_LINK_UP) { | 205 | if (adapter->ahw.xg_linkup == 1 && val != XG_LINK_UP) { |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 7144c255ce54..6797ed069f1f 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -70,17 +70,15 @@ static void netxen_nic_poll_controller(struct net_device *netdev); | |||
70 | static irqreturn_t netxen_intr(int irq, void *data); | 70 | static irqreturn_t netxen_intr(int irq, void *data); |
71 | static irqreturn_t netxen_msi_intr(int irq, void *data); | 71 | static irqreturn_t netxen_msi_intr(int irq, void *data); |
72 | 72 | ||
73 | int physical_port[] = {0, 1, 2, 3}; | ||
74 | |||
75 | /* PCI Device ID Table */ | 73 | /* PCI Device ID Table */ |
76 | static struct pci_device_id netxen_pci_tbl[] __devinitdata = { | 74 | static struct pci_device_id netxen_pci_tbl[] __devinitdata = { |
77 | {PCI_DEVICE(0x4040, 0x0001)}, | 75 | {PCI_DEVICE(0x4040, 0x0001), PCI_DEVICE_CLASS(0x020000, ~0)}, |
78 | {PCI_DEVICE(0x4040, 0x0002)}, | 76 | {PCI_DEVICE(0x4040, 0x0002), PCI_DEVICE_CLASS(0x020000, ~0)}, |
79 | {PCI_DEVICE(0x4040, 0x0003)}, | 77 | {PCI_DEVICE(0x4040, 0x0003), PCI_DEVICE_CLASS(0x020000, ~0)}, |
80 | {PCI_DEVICE(0x4040, 0x0004)}, | 78 | {PCI_DEVICE(0x4040, 0x0004), PCI_DEVICE_CLASS(0x020000, ~0)}, |
81 | {PCI_DEVICE(0x4040, 0x0005)}, | 79 | {PCI_DEVICE(0x4040, 0x0005), PCI_DEVICE_CLASS(0x020000, ~0)}, |
82 | {PCI_DEVICE(0x4040, 0x0024)}, | 80 | {PCI_DEVICE(0x4040, 0x0024), PCI_DEVICE_CLASS(0x020000, ~0)}, |
83 | {PCI_DEVICE(0x4040, 0x0025)}, | 81 | {PCI_DEVICE(0x4040, 0x0025), PCI_DEVICE_CLASS(0x020000, ~0)}, |
84 | {0,} | 82 | {0,} |
85 | }; | 83 | }; |
86 | 84 | ||
@@ -288,10 +286,11 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
288 | int pci_func_id = PCI_FUNC(pdev->devfn); | 286 | int pci_func_id = PCI_FUNC(pdev->devfn); |
289 | DECLARE_MAC_BUF(mac); | 287 | DECLARE_MAC_BUF(mac); |
290 | 288 | ||
291 | printk(KERN_INFO "%s \n", netxen_nic_driver_string); | 289 | if (pci_func_id == 0) |
290 | printk(KERN_INFO "%s \n", netxen_nic_driver_string); | ||
292 | 291 | ||
293 | if (pdev->class != 0x020000) { | 292 | if (pdev->class != 0x020000) { |
294 | printk(KERN_ERR"NetXen function %d, class %x will not " | 293 | printk(KERN_DEBUG "NetXen function %d, class %x will not " |
295 | "be enabled.\n",pci_func_id, pdev->class); | 294 | "be enabled.\n",pci_func_id, pdev->class); |
296 | return -ENODEV; | 295 | return -ENODEV; |
297 | } | 296 | } |
@@ -450,8 +449,12 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
450 | */ | 449 | */ |
451 | adapter->curr_window = 255; | 450 | adapter->curr_window = 255; |
452 | 451 | ||
453 | /* initialize the adapter */ | 452 | if (netxen_nic_get_board_info(adapter) != 0) { |
454 | netxen_initialize_adapter_hw(adapter); | 453 | printk("%s: Error getting board config info.\n", |
454 | netxen_nic_driver_name); | ||
455 | err = -EIO; | ||
456 | goto err_out_iounmap; | ||
457 | } | ||
455 | 458 | ||
456 | /* | 459 | /* |
457 | * Adapter in our case is quad port so initialize it before | 460 | * Adapter in our case is quad port so initialize it before |
@@ -530,17 +533,15 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
530 | netxen_initialize_adapter_sw(adapter); /* initialize the buffers in adapter */ | 533 | netxen_initialize_adapter_sw(adapter); /* initialize the buffers in adapter */ |
531 | 534 | ||
532 | /* Mezz cards have PCI function 0,2,3 enabled */ | 535 | /* Mezz cards have PCI function 0,2,3 enabled */ |
533 | if ((adapter->ahw.boardcfg.board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) | 536 | switch (adapter->ahw.boardcfg.board_type) { |
534 | && (pci_func_id >= 2)) | 537 | case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: |
538 | case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: | ||
539 | if (pci_func_id >= 2) | ||
535 | adapter->portnum = pci_func_id - 2; | 540 | adapter->portnum = pci_func_id - 2; |
536 | 541 | break; | |
537 | #ifdef CONFIG_IA64 | 542 | default: |
538 | if(adapter->portnum == 0) { | 543 | break; |
539 | netxen_pinit_from_rom(adapter, 0); | ||
540 | udelay(500); | ||
541 | netxen_load_firmware(adapter); | ||
542 | } | 544 | } |
543 | #endif | ||
544 | 545 | ||
545 | init_timer(&adapter->watchdog_timer); | 546 | init_timer(&adapter->watchdog_timer); |
546 | adapter->ahw.xg_linkup = 0; | 547 | adapter->ahw.xg_linkup = 0; |
@@ -613,11 +614,18 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
613 | err = -ENODEV; | 614 | err = -ENODEV; |
614 | goto err_out_free_dev; | 615 | goto err_out_free_dev; |
615 | } | 616 | } |
617 | } else { | ||
618 | writel(0, NETXEN_CRB_NORMALIZE(adapter, | ||
619 | CRB_CMDPEG_STATE)); | ||
620 | netxen_pinit_from_rom(adapter, 0); | ||
621 | msleep(1); | ||
622 | netxen_load_firmware(adapter); | ||
623 | netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); | ||
616 | } | 624 | } |
617 | 625 | ||
618 | /* clear the register for future unloads/loads */ | 626 | /* clear the register for future unloads/loads */ |
619 | writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_CAM_RAM(0x1fc))); | 627 | writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_CAM_RAM(0x1fc))); |
620 | printk(KERN_INFO "State: 0x%0x\n", | 628 | dev_info(&pdev->dev, "cmdpeg state: 0x%0x\n", |
621 | readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE))); | 629 | readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE))); |
622 | 630 | ||
623 | /* | 631 | /* |
@@ -639,9 +647,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
639 | /* | 647 | /* |
640 | * See if the firmware gave us a virtual-physical port mapping. | 648 | * See if the firmware gave us a virtual-physical port mapping. |
641 | */ | 649 | */ |
650 | adapter->physical_port = adapter->portnum; | ||
642 | i = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_V2P(adapter->portnum))); | 651 | i = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_V2P(adapter->portnum))); |
643 | if (i != 0x55555555) | 652 | if (i != 0x55555555) |
644 | physical_port[adapter->portnum] = i; | 653 | adapter->physical_port = i; |
645 | 654 | ||
646 | netif_carrier_off(netdev); | 655 | netif_carrier_off(netdev); |
647 | netif_stop_queue(netdev); | 656 | netif_stop_queue(netdev); |
@@ -654,22 +663,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
654 | goto err_out_free_dev; | 663 | goto err_out_free_dev; |
655 | } | 664 | } |
656 | 665 | ||
666 | netxen_nic_flash_print(adapter); | ||
657 | pci_set_drvdata(pdev, adapter); | 667 | pci_set_drvdata(pdev, adapter); |
658 | 668 | ||
659 | switch (adapter->ahw.board_type) { | ||
660 | case NETXEN_NIC_GBE: | ||
661 | printk(KERN_INFO "%s: QUAD GbE board initialized\n", | ||
662 | netxen_nic_driver_name); | ||
663 | break; | ||
664 | |||
665 | case NETXEN_NIC_XGBE: | ||
666 | printk(KERN_INFO "%s: XGbE board initialized\n", | ||
667 | netxen_nic_driver_name); | ||
668 | break; | ||
669 | } | ||
670 | |||
671 | adapter->driver_mismatch = 0; | ||
672 | |||
673 | return 0; | 669 | return 0; |
674 | 670 | ||
675 | err_out_free_dev: | 671 | err_out_free_dev: |
@@ -760,55 +756,8 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) | |||
760 | 756 | ||
761 | vfree(adapter->cmd_buf_arr); | 757 | vfree(adapter->cmd_buf_arr); |
762 | 758 | ||
763 | if (adapter->portnum == 0) { | 759 | if (adapter->portnum == 0) |
764 | if (init_firmware_done) { | 760 | netxen_free_adapter_offload(adapter); |
765 | i = 100; | ||
766 | do { | ||
767 | if (dma_watchdog_shutdown_request(adapter) == 1) | ||
768 | break; | ||
769 | msleep(100); | ||
770 | if (dma_watchdog_shutdown_poll_result(adapter) == 1) | ||
771 | break; | ||
772 | } while (--i); | ||
773 | |||
774 | if (i == 0) | ||
775 | printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", | ||
776 | netdev->name); | ||
777 | |||
778 | /* clear the register for future unloads/loads */ | ||
779 | writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_CAM_RAM(0x1fc))); | ||
780 | printk(KERN_INFO "State: 0x%0x\n", | ||
781 | readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE))); | ||
782 | |||
783 | /* leave the hw in the same state as reboot */ | ||
784 | writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); | ||
785 | netxen_pinit_from_rom(adapter, 0); | ||
786 | msleep(1); | ||
787 | netxen_load_firmware(adapter); | ||
788 | netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); | ||
789 | } | ||
790 | |||
791 | /* clear the register for future unloads/loads */ | ||
792 | writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_CAM_RAM(0x1fc))); | ||
793 | printk(KERN_INFO "State: 0x%0x\n", | ||
794 | readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE))); | ||
795 | |||
796 | i = 100; | ||
797 | do { | ||
798 | if (dma_watchdog_shutdown_request(adapter) == 1) | ||
799 | break; | ||
800 | msleep(100); | ||
801 | if (dma_watchdog_shutdown_poll_result(adapter) == 1) | ||
802 | break; | ||
803 | } while (--i); | ||
804 | |||
805 | if (i) { | ||
806 | netxen_free_adapter_offload(adapter); | ||
807 | } else { | ||
808 | printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", | ||
809 | netdev->name); | ||
810 | } | ||
811 | } | ||
812 | 761 | ||
813 | if (adapter->irq) | 762 | if (adapter->irq) |
814 | free_irq(adapter->irq, adapter); | 763 | free_irq(adapter->irq, adapter); |
@@ -840,13 +789,15 @@ static int netxen_nic_open(struct net_device *netdev) | |||
840 | irq_handler_t handler; | 789 | irq_handler_t handler; |
841 | unsigned long flags = IRQF_SAMPLE_RANDOM; | 790 | unsigned long flags = IRQF_SAMPLE_RANDOM; |
842 | 791 | ||
792 | if (adapter->driver_mismatch) | ||
793 | return -EIO; | ||
794 | |||
843 | if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) { | 795 | if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) { |
844 | err = netxen_init_firmware(adapter); | 796 | err = netxen_init_firmware(adapter); |
845 | if (err != 0) { | 797 | if (err != 0) { |
846 | printk(KERN_ERR "Failed to init firmware\n"); | 798 | printk(KERN_ERR "Failed to init firmware\n"); |
847 | return -EIO; | 799 | return -EIO; |
848 | } | 800 | } |
849 | netxen_nic_flash_print(adapter); | ||
850 | 801 | ||
851 | /* setup all the resources for the Phantom... */ | 802 | /* setup all the resources for the Phantom... */ |
852 | /* this include the descriptors for rcv, tx, and status */ | 803 | /* this include the descriptors for rcv, tx, and status */ |
@@ -895,14 +846,12 @@ static int netxen_nic_open(struct net_device *netdev) | |||
895 | if (adapter->set_mtu) | 846 | if (adapter->set_mtu) |
896 | adapter->set_mtu(adapter, netdev->mtu); | 847 | adapter->set_mtu(adapter, netdev->mtu); |
897 | 848 | ||
898 | if (!adapter->driver_mismatch) | 849 | mod_timer(&adapter->watchdog_timer, jiffies); |
899 | mod_timer(&adapter->watchdog_timer, jiffies); | ||
900 | 850 | ||
901 | napi_enable(&adapter->napi); | 851 | napi_enable(&adapter->napi); |
902 | netxen_nic_enable_int(adapter); | 852 | netxen_nic_enable_int(adapter); |
903 | 853 | ||
904 | if (!adapter->driver_mismatch) | 854 | netif_start_queue(netdev); |
905 | netif_start_queue(netdev); | ||
906 | 855 | ||
907 | return 0; | 856 | return 0; |
908 | } | 857 | } |
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c index 1c852a76c80d..a3bc7cc67a6f 100644 --- a/drivers/net/netxen/netxen_nic_niu.c +++ b/drivers/net/netxen/netxen_nic_niu.c | |||
@@ -94,7 +94,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg, | |||
94 | long timeout = 0; | 94 | long timeout = 0; |
95 | long result = 0; | 95 | long result = 0; |
96 | long restore = 0; | 96 | long restore = 0; |
97 | long phy = physical_port[adapter->portnum]; | 97 | long phy = adapter->physical_port; |
98 | __u32 address; | 98 | __u32 address; |
99 | __u32 command; | 99 | __u32 command; |
100 | __u32 status; | 100 | __u32 status; |
@@ -190,7 +190,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg, | |||
190 | long timeout = 0; | 190 | long timeout = 0; |
191 | long result = 0; | 191 | long result = 0; |
192 | long restore = 0; | 192 | long restore = 0; |
193 | long phy = physical_port[adapter->portnum]; | 193 | long phy = adapter->physical_port; |
194 | __u32 address; | 194 | __u32 address; |
195 | __u32 command; | 195 | __u32 command; |
196 | __u32 status; | 196 | __u32 status; |
@@ -456,7 +456,7 @@ int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port) | |||
456 | 456 | ||
457 | int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) | 457 | int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) |
458 | { | 458 | { |
459 | u32 portnum = physical_port[adapter->portnum]; | 459 | u32 portnum = adapter->physical_port; |
460 | 460 | ||
461 | netxen_crb_writelit_adapter(adapter, | 461 | netxen_crb_writelit_adapter(adapter, |
462 | NETXEN_NIU_XGE_CONFIG_1+(0x10000*portnum), 0x1447); | 462 | NETXEN_NIU_XGE_CONFIG_1+(0x10000*portnum), 0x1447); |
@@ -573,7 +573,7 @@ static int netxen_niu_macaddr_get(struct netxen_adapter *adapter, | |||
573 | { | 573 | { |
574 | u32 stationhigh; | 574 | u32 stationhigh; |
575 | u32 stationlow; | 575 | u32 stationlow; |
576 | int phy = physical_port[adapter->portnum]; | 576 | int phy = adapter->physical_port; |
577 | u8 val[8]; | 577 | u8 val[8]; |
578 | 578 | ||
579 | if (addr == NULL) | 579 | if (addr == NULL) |
@@ -604,7 +604,7 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter, | |||
604 | { | 604 | { |
605 | u8 temp[4]; | 605 | u8 temp[4]; |
606 | u32 val; | 606 | u32 val; |
607 | int phy = physical_port[adapter->portnum]; | 607 | int phy = adapter->physical_port; |
608 | unsigned char mac_addr[6]; | 608 | unsigned char mac_addr[6]; |
609 | int i; | 609 | int i; |
610 | DECLARE_MAC_BUF(mac); | 610 | DECLARE_MAC_BUF(mac); |
@@ -724,7 +724,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter, | |||
724 | int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter) | 724 | int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter) |
725 | { | 725 | { |
726 | __u32 mac_cfg0; | 726 | __u32 mac_cfg0; |
727 | u32 port = physical_port[adapter->portnum]; | 727 | u32 port = adapter->physical_port; |
728 | 728 | ||
729 | if (port > NETXEN_NIU_MAX_GBE_PORTS) | 729 | if (port > NETXEN_NIU_MAX_GBE_PORTS) |
730 | return -EINVAL; | 730 | return -EINVAL; |
@@ -740,7 +740,7 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter) | |||
740 | int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) | 740 | int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) |
741 | { | 741 | { |
742 | __u32 mac_cfg; | 742 | __u32 mac_cfg; |
743 | u32 port = physical_port[adapter->portnum]; | 743 | u32 port = adapter->physical_port; |
744 | 744 | ||
745 | if (port > NETXEN_NIU_MAX_XG_PORTS) | 745 | if (port > NETXEN_NIU_MAX_XG_PORTS) |
746 | return -EINVAL; | 746 | return -EINVAL; |
@@ -757,7 +757,7 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, | |||
757 | netxen_niu_prom_mode_t mode) | 757 | netxen_niu_prom_mode_t mode) |
758 | { | 758 | { |
759 | __u32 reg; | 759 | __u32 reg; |
760 | u32 port = physical_port[adapter->portnum]; | 760 | u32 port = adapter->physical_port; |
761 | 761 | ||
762 | if (port > NETXEN_NIU_MAX_GBE_PORTS) | 762 | if (port > NETXEN_NIU_MAX_GBE_PORTS) |
763 | return -EINVAL; | 763 | return -EINVAL; |
@@ -814,7 +814,7 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, | |||
814 | int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter, | 814 | int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter, |
815 | netxen_ethernet_macaddr_t addr) | 815 | netxen_ethernet_macaddr_t addr) |
816 | { | 816 | { |
817 | int phy = physical_port[adapter->portnum]; | 817 | int phy = adapter->physical_port; |
818 | u8 temp[4]; | 818 | u8 temp[4]; |
819 | u32 val; | 819 | u32 val; |
820 | 820 | ||
@@ -867,7 +867,7 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter, | |||
867 | int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, | 867 | int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, |
868 | netxen_ethernet_macaddr_t * addr) | 868 | netxen_ethernet_macaddr_t * addr) |
869 | { | 869 | { |
870 | int phy = physical_port[adapter->portnum]; | 870 | int phy = adapter->physical_port; |
871 | u32 stationhigh; | 871 | u32 stationhigh; |
872 | u32 stationlow; | 872 | u32 stationlow; |
873 | u8 val[8]; | 873 | u8 val[8]; |
@@ -896,7 +896,7 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, | |||
896 | netxen_niu_prom_mode_t mode) | 896 | netxen_niu_prom_mode_t mode) |
897 | { | 897 | { |
898 | __u32 reg; | 898 | __u32 reg; |
899 | u32 port = physical_port[adapter->portnum]; | 899 | u32 port = adapter->physical_port; |
900 | 900 | ||
901 | if (port > NETXEN_NIU_MAX_XG_PORTS) | 901 | if (port > NETXEN_NIU_MAX_XG_PORTS) |
902 | return -EINVAL; | 902 | return -EINVAL; |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 8f328a03847b..a550c9bd126f 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -391,7 +391,9 @@ static int fmvj18x_config(struct pcmcia_device *link) | |||
391 | cardtype = CONTEC; | 391 | cardtype = CONTEC; |
392 | break; | 392 | break; |
393 | case MANFID_FUJITSU: | 393 | case MANFID_FUJITSU: |
394 | if (link->card_id == PRODID_FUJITSU_MBH10302) | 394 | if (link->conf.ConfigBase == 0x0fe0) |
395 | cardtype = MBH10302; | ||
396 | else if (link->card_id == PRODID_FUJITSU_MBH10302) | ||
395 | /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), | 397 | /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), |
396 | but these are MBH10304 based card. */ | 398 | but these are MBH10304 based card. */ |
397 | cardtype = MBH10304; | 399 | cardtype = MBH10304; |
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index d041f831a18d..f6c4698ce738 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c | |||
@@ -1461,22 +1461,25 @@ static void | |||
1461 | set_multicast_list(struct net_device *dev) | 1461 | set_multicast_list(struct net_device *dev) |
1462 | { | 1462 | { |
1463 | unsigned int ioaddr = dev->base_addr; | 1463 | unsigned int ioaddr = dev->base_addr; |
1464 | unsigned value; | ||
1464 | 1465 | ||
1465 | SelectPage(0x42); | 1466 | SelectPage(0x42); |
1467 | value = GetByte(XIRCREG42_SWC1) & 0xC0; | ||
1468 | |||
1466 | if (dev->flags & IFF_PROMISC) { /* snoop */ | 1469 | if (dev->flags & IFF_PROMISC) { /* snoop */ |
1467 | PutByte(XIRCREG42_SWC1, 0x06); /* set MPE and PME */ | 1470 | PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */ |
1468 | } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { | 1471 | } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { |
1469 | PutByte(XIRCREG42_SWC1, 0x02); /* set MPE */ | 1472 | PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */ |
1470 | } else if (dev->mc_count) { | 1473 | } else if (dev->mc_count) { |
1471 | /* the chip can filter 9 addresses perfectly */ | 1474 | /* the chip can filter 9 addresses perfectly */ |
1472 | PutByte(XIRCREG42_SWC1, 0x01); | 1475 | PutByte(XIRCREG42_SWC1, value | 0x01); |
1473 | SelectPage(0x40); | 1476 | SelectPage(0x40); |
1474 | PutByte(XIRCREG40_CMD0, Offline); | 1477 | PutByte(XIRCREG40_CMD0, Offline); |
1475 | set_addresses(dev); | 1478 | set_addresses(dev); |
1476 | SelectPage(0x40); | 1479 | SelectPage(0x40); |
1477 | PutByte(XIRCREG40_CMD0, EnableRecv | Online); | 1480 | PutByte(XIRCREG40_CMD0, EnableRecv | Online); |
1478 | } else { /* standard usage */ | 1481 | } else { /* standard usage */ |
1479 | PutByte(XIRCREG42_SWC1, 0x00); | 1482 | PutByte(XIRCREG42_SWC1, value | 0x00); |
1480 | } | 1483 | } |
1481 | SelectPage(0); | 1484 | SelectPage(0); |
1482 | } | 1485 | } |
@@ -1722,6 +1725,7 @@ do_reset(struct net_device *dev, int full) | |||
1722 | 1725 | ||
1723 | /* enable receiver and put the mac online */ | 1726 | /* enable receiver and put the mac online */ |
1724 | if (full) { | 1727 | if (full) { |
1728 | set_multicast_list(dev); | ||
1725 | SelectPage(0x40); | 1729 | SelectPage(0x40); |
1726 | PutByte(XIRCREG40_CMD0, EnableRecv | Online); | 1730 | PutByte(XIRCREG40_CMD0, EnableRecv | Online); |
1727 | } | 1731 | } |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index a1c454dbc164..1c89b97f4e09 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -325,7 +325,7 @@ static int pcnet32_get_regs_len(struct net_device *dev); | |||
325 | static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, | 325 | static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
326 | void *ptr); | 326 | void *ptr); |
327 | static void pcnet32_purge_tx_ring(struct net_device *dev); | 327 | static void pcnet32_purge_tx_ring(struct net_device *dev); |
328 | static int pcnet32_alloc_ring(struct net_device *dev, char *name); | 328 | static int pcnet32_alloc_ring(struct net_device *dev, const char *name); |
329 | static void pcnet32_free_ring(struct net_device *dev); | 329 | static void pcnet32_free_ring(struct net_device *dev); |
330 | static void pcnet32_check_media(struct net_device *dev, int verbose); | 330 | static void pcnet32_check_media(struct net_device *dev, int verbose); |
331 | 331 | ||
@@ -1983,7 +1983,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1983 | } | 1983 | } |
1984 | 1984 | ||
1985 | /* if any allocation fails, caller must also call pcnet32_free_ring */ | 1985 | /* if any allocation fails, caller must also call pcnet32_free_ring */ |
1986 | static int pcnet32_alloc_ring(struct net_device *dev, char *name) | 1986 | static int pcnet32_alloc_ring(struct net_device *dev, const char *name) |
1987 | { | 1987 | { |
1988 | struct pcnet32_private *lp = netdev_priv(dev); | 1988 | struct pcnet32_private *lp = netdev_priv(dev); |
1989 | 1989 | ||
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 6bf9e76b0a00..6eb2d31d1e34 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
@@ -5,7 +5,7 @@ | |||
5 | menuconfig PHYLIB | 5 | menuconfig PHYLIB |
6 | tristate "PHY Device support and infrastructure" | 6 | tristate "PHY Device support and infrastructure" |
7 | depends on !S390 | 7 | depends on !S390 |
8 | depends on NET_ETHERNET && (BROKEN || !S390) | 8 | depends on NET_ETHERNET |
9 | help | 9 | help |
10 | Ethernet controllers are usually attached to PHY | 10 | Ethernet controllers are usually attached to PHY |
11 | devices. This option provides infrastructure for | 11 | devices. This option provides infrastructure for |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ac3c01d28fdf..16a0e7de5888 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -207,6 +207,7 @@ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id) | |||
207 | 207 | ||
208 | return 0; | 208 | return 0; |
209 | } | 209 | } |
210 | EXPORT_SYMBOL(get_phy_id); | ||
210 | 211 | ||
211 | /** | 212 | /** |
212 | * get_phy_device - reads the specified PHY device and returns its @phy_device struct | 213 | * get_phy_device - reads the specified PHY device and returns its @phy_device struct |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 58a26a47af29..fc6f4b8c64b3 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
@@ -341,12 +341,6 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) | |||
341 | struct pppox_sock *relay_po; | 341 | struct pppox_sock *relay_po; |
342 | 342 | ||
343 | if (sk->sk_state & PPPOX_BOUND) { | 343 | if (sk->sk_state & PPPOX_BOUND) { |
344 | struct pppoe_hdr *ph = pppoe_hdr(skb); | ||
345 | int len = ntohs(ph->length); | ||
346 | skb_pull_rcsum(skb, sizeof(struct pppoe_hdr)); | ||
347 | if (pskb_trim_rcsum(skb, len)) | ||
348 | goto abort_kfree; | ||
349 | |||
350 | ppp_input(&po->chan, skb); | 344 | ppp_input(&po->chan, skb); |
351 | } else if (sk->sk_state & PPPOX_RELAY) { | 345 | } else if (sk->sk_state & PPPOX_RELAY) { |
352 | relay_po = get_item_by_addr(&po->pppoe_relay); | 346 | relay_po = get_item_by_addr(&po->pppoe_relay); |
@@ -357,7 +351,6 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) | |||
357 | if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0) | 351 | if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0) |
358 | goto abort_put; | 352 | goto abort_put; |
359 | 353 | ||
360 | skb_pull(skb, sizeof(struct pppoe_hdr)); | ||
361 | if (!__pppoe_xmit(sk_pppox(relay_po), skb)) | 354 | if (!__pppoe_xmit(sk_pppox(relay_po), skb)) |
362 | goto abort_put; | 355 | goto abort_put; |
363 | } else { | 356 | } else { |
@@ -388,6 +381,7 @@ static int pppoe_rcv(struct sk_buff *skb, | |||
388 | { | 381 | { |
389 | struct pppoe_hdr *ph; | 382 | struct pppoe_hdr *ph; |
390 | struct pppox_sock *po; | 383 | struct pppox_sock *po; |
384 | int len; | ||
391 | 385 | ||
392 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) | 386 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) |
393 | goto out; | 387 | goto out; |
@@ -399,10 +393,21 @@ static int pppoe_rcv(struct sk_buff *skb, | |||
399 | goto drop; | 393 | goto drop; |
400 | 394 | ||
401 | ph = pppoe_hdr(skb); | 395 | ph = pppoe_hdr(skb); |
396 | len = ntohs(ph->length); | ||
397 | |||
398 | skb_pull_rcsum(skb, sizeof(*ph)); | ||
399 | if (skb->len < len) | ||
400 | goto drop; | ||
402 | 401 | ||
403 | po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); | 402 | po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); |
404 | if (po != NULL) | 403 | if (!po) |
405 | return sk_receive_skb(sk_pppox(po), skb, 0); | 404 | goto drop; |
405 | |||
406 | if (pskb_trim_rcsum(skb, len)) | ||
407 | goto drop; | ||
408 | |||
409 | return sk_receive_skb(sk_pppox(po), skb, 0); | ||
410 | |||
406 | drop: | 411 | drop: |
407 | kfree_skb(skb); | 412 | kfree_skb(skb); |
408 | out: | 413 | out: |
@@ -427,12 +432,12 @@ static int pppoe_disc_rcv(struct sk_buff *skb, | |||
427 | if (dev_net(dev) != &init_net) | 432 | if (dev_net(dev) != &init_net) |
428 | goto abort; | 433 | goto abort; |
429 | 434 | ||
430 | if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) | ||
431 | goto abort; | ||
432 | |||
433 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) | 435 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) |
434 | goto out; | 436 | goto out; |
435 | 437 | ||
438 | if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) | ||
439 | goto abort; | ||
440 | |||
436 | ph = pppoe_hdr(skb); | 441 | ph = pppoe_hdr(skb); |
437 | if (ph->code != PADT_CODE) | 442 | if (ph->code != PADT_CODE) |
438 | goto abort; | 443 | goto abort; |
@@ -937,12 +942,10 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
937 | m->msg_namelen = 0; | 942 | m->msg_namelen = 0; |
938 | 943 | ||
939 | if (skb) { | 944 | if (skb) { |
940 | struct pppoe_hdr *ph = pppoe_hdr(skb); | 945 | total_len = min_t(size_t, total_len, skb->len); |
941 | const int len = ntohs(ph->length); | 946 | error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); |
942 | |||
943 | error = memcpy_toiovec(m->msg_iov, (unsigned char *) &ph->tag[0], len); | ||
944 | if (error == 0) | 947 | if (error == 0) |
945 | error = len; | 948 | error = total_len; |
946 | } | 949 | } |
947 | 950 | ||
948 | kfree_skb(skb); | 951 | kfree_skb(skb); |
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index 79359919335b..f9298827a76c 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c | |||
@@ -240,12 +240,15 @@ static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk) | |||
240 | if (sk == NULL) | 240 | if (sk == NULL) |
241 | return NULL; | 241 | return NULL; |
242 | 242 | ||
243 | sock_hold(sk); | ||
243 | session = (struct pppol2tp_session *)(sk->sk_user_data); | 244 | session = (struct pppol2tp_session *)(sk->sk_user_data); |
244 | if (session == NULL) | 245 | if (session == NULL) { |
245 | return NULL; | 246 | sock_put(sk); |
247 | goto out; | ||
248 | } | ||
246 | 249 | ||
247 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | 250 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); |
248 | 251 | out: | |
249 | return session; | 252 | return session; |
250 | } | 253 | } |
251 | 254 | ||
@@ -256,12 +259,15 @@ static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk) | |||
256 | if (sk == NULL) | 259 | if (sk == NULL) |
257 | return NULL; | 260 | return NULL; |
258 | 261 | ||
262 | sock_hold(sk); | ||
259 | tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data); | 263 | tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data); |
260 | if (tunnel == NULL) | 264 | if (tunnel == NULL) { |
261 | return NULL; | 265 | sock_put(sk); |
266 | goto out; | ||
267 | } | ||
262 | 268 | ||
263 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); | 269 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); |
264 | 270 | out: | |
265 | return tunnel; | 271 | return tunnel; |
266 | } | 272 | } |
267 | 273 | ||
@@ -716,12 +722,14 @@ discard: | |||
716 | session->stats.rx_errors++; | 722 | session->stats.rx_errors++; |
717 | kfree_skb(skb); | 723 | kfree_skb(skb); |
718 | sock_put(session->sock); | 724 | sock_put(session->sock); |
725 | sock_put(sock); | ||
719 | 726 | ||
720 | return 0; | 727 | return 0; |
721 | 728 | ||
722 | error: | 729 | error: |
723 | /* Put UDP header back */ | 730 | /* Put UDP header back */ |
724 | __skb_push(skb, sizeof(struct udphdr)); | 731 | __skb_push(skb, sizeof(struct udphdr)); |
732 | sock_put(sock); | ||
725 | 733 | ||
726 | no_tunnel: | 734 | no_tunnel: |
727 | return 1; | 735 | return 1; |
@@ -745,10 +753,13 @@ static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
745 | "%s: received %d bytes\n", tunnel->name, skb->len); | 753 | "%s: received %d bytes\n", tunnel->name, skb->len); |
746 | 754 | ||
747 | if (pppol2tp_recv_core(sk, skb)) | 755 | if (pppol2tp_recv_core(sk, skb)) |
748 | goto pass_up; | 756 | goto pass_up_put; |
749 | 757 | ||
758 | sock_put(sk); | ||
750 | return 0; | 759 | return 0; |
751 | 760 | ||
761 | pass_up_put: | ||
762 | sock_put(sk); | ||
752 | pass_up: | 763 | pass_up: |
753 | return 1; | 764 | return 1; |
754 | } | 765 | } |
@@ -772,14 +783,18 @@ static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
772 | err = 0; | 783 | err = 0; |
773 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | 784 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, |
774 | flags & MSG_DONTWAIT, &err); | 785 | flags & MSG_DONTWAIT, &err); |
775 | if (skb) { | 786 | if (!skb) |
776 | err = memcpy_toiovec(msg->msg_iov, (unsigned char *) skb->data, | 787 | goto end; |
777 | skb->len); | 788 | |
778 | if (err < 0) | 789 | if (len > skb->len) |
779 | goto do_skb_free; | 790 | len = skb->len; |
780 | err = skb->len; | 791 | else if (len < skb->len) |
781 | } | 792 | msg->msg_flags |= MSG_TRUNC; |
782 | do_skb_free: | 793 | |
794 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); | ||
795 | if (likely(err == 0)) | ||
796 | err = len; | ||
797 | |||
783 | kfree_skb(skb); | 798 | kfree_skb(skb); |
784 | end: | 799 | end: |
785 | return err; | 800 | return err; |
@@ -858,7 +873,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh | |||
858 | 873 | ||
859 | tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); | 874 | tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); |
860 | if (tunnel == NULL) | 875 | if (tunnel == NULL) |
861 | goto error; | 876 | goto error_put_sess; |
862 | 877 | ||
863 | /* What header length is configured for this session? */ | 878 | /* What header length is configured for this session? */ |
864 | hdr_len = pppol2tp_l2tp_header_len(session); | 879 | hdr_len = pppol2tp_l2tp_header_len(session); |
@@ -870,7 +885,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh | |||
870 | sizeof(ppph) + total_len, | 885 | sizeof(ppph) + total_len, |
871 | 0, GFP_KERNEL); | 886 | 0, GFP_KERNEL); |
872 | if (!skb) | 887 | if (!skb) |
873 | goto error; | 888 | goto error_put_sess_tun; |
874 | 889 | ||
875 | /* Reserve space for headers. */ | 890 | /* Reserve space for headers. */ |
876 | skb_reserve(skb, NET_SKB_PAD); | 891 | skb_reserve(skb, NET_SKB_PAD); |
@@ -900,7 +915,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh | |||
900 | error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); | 915 | error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); |
901 | if (error < 0) { | 916 | if (error < 0) { |
902 | kfree_skb(skb); | 917 | kfree_skb(skb); |
903 | goto error; | 918 | goto error_put_sess_tun; |
904 | } | 919 | } |
905 | skb_put(skb, total_len); | 920 | skb_put(skb, total_len); |
906 | 921 | ||
@@ -947,10 +962,33 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh | |||
947 | session->stats.tx_errors++; | 962 | session->stats.tx_errors++; |
948 | } | 963 | } |
949 | 964 | ||
965 | return error; | ||
966 | |||
967 | error_put_sess_tun: | ||
968 | sock_put(session->tunnel_sock); | ||
969 | error_put_sess: | ||
970 | sock_put(sk); | ||
950 | error: | 971 | error: |
951 | return error; | 972 | return error; |
952 | } | 973 | } |
953 | 974 | ||
975 | /* Automatically called when the skb is freed. | ||
976 | */ | ||
977 | static void pppol2tp_sock_wfree(struct sk_buff *skb) | ||
978 | { | ||
979 | sock_put(skb->sk); | ||
980 | } | ||
981 | |||
982 | /* For data skbs that we transmit, we associate with the tunnel socket | ||
983 | * but don't do accounting. | ||
984 | */ | ||
985 | static inline void pppol2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk) | ||
986 | { | ||
987 | sock_hold(sk); | ||
988 | skb->sk = sk; | ||
989 | skb->destructor = pppol2tp_sock_wfree; | ||
990 | } | ||
991 | |||
954 | /* Transmit function called by generic PPP driver. Sends PPP frame | 992 | /* Transmit function called by generic PPP driver. Sends PPP frame |
955 | * over PPPoL2TP socket. | 993 | * over PPPoL2TP socket. |
956 | * | 994 | * |
@@ -980,6 +1018,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
980 | __wsum csum = 0; | 1018 | __wsum csum = 0; |
981 | struct udphdr *uh; | 1019 | struct udphdr *uh; |
982 | unsigned int len; | 1020 | unsigned int len; |
1021 | int old_headroom; | ||
1022 | int new_headroom; | ||
983 | 1023 | ||
984 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) | 1024 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) |
985 | goto abort; | 1025 | goto abort; |
@@ -991,25 +1031,27 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
991 | 1031 | ||
992 | sk_tun = session->tunnel_sock; | 1032 | sk_tun = session->tunnel_sock; |
993 | if (sk_tun == NULL) | 1033 | if (sk_tun == NULL) |
994 | goto abort; | 1034 | goto abort_put_sess; |
995 | tunnel = pppol2tp_sock_to_tunnel(sk_tun); | 1035 | tunnel = pppol2tp_sock_to_tunnel(sk_tun); |
996 | if (tunnel == NULL) | 1036 | if (tunnel == NULL) |
997 | goto abort; | 1037 | goto abort_put_sess; |
998 | 1038 | ||
999 | /* What header length is configured for this session? */ | 1039 | /* What header length is configured for this session? */ |
1000 | hdr_len = pppol2tp_l2tp_header_len(session); | 1040 | hdr_len = pppol2tp_l2tp_header_len(session); |
1001 | 1041 | ||
1002 | /* Check that there's enough headroom in the skb to insert IP, | 1042 | /* Check that there's enough headroom in the skb to insert IP, |
1003 | * UDP and L2TP and PPP headers. If not enough, expand it to | 1043 | * UDP and L2TP and PPP headers. If not enough, expand it to |
1004 | * make room. Note that a new skb (or a clone) is | 1044 | * make room. Adjust truesize. |
1005 | * allocated. If we return an error from this point on, make | ||
1006 | * sure we free the new skb but do not free the original skb | ||
1007 | * since that is done by the caller for the error case. | ||
1008 | */ | 1045 | */ |
1009 | headroom = NET_SKB_PAD + sizeof(struct iphdr) + | 1046 | headroom = NET_SKB_PAD + sizeof(struct iphdr) + |
1010 | sizeof(struct udphdr) + hdr_len + sizeof(ppph); | 1047 | sizeof(struct udphdr) + hdr_len + sizeof(ppph); |
1048 | old_headroom = skb_headroom(skb); | ||
1011 | if (skb_cow_head(skb, headroom)) | 1049 | if (skb_cow_head(skb, headroom)) |
1012 | goto abort; | 1050 | goto abort_put_sess_tun; |
1051 | |||
1052 | new_headroom = skb_headroom(skb); | ||
1053 | skb_orphan(skb); | ||
1054 | skb->truesize += new_headroom - old_headroom; | ||
1013 | 1055 | ||
1014 | /* Setup PPP header */ | 1056 | /* Setup PPP header */ |
1015 | __skb_push(skb, sizeof(ppph)); | 1057 | __skb_push(skb, sizeof(ppph)); |
@@ -1065,8 +1107,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
1065 | /* Get routing info from the tunnel socket */ | 1107 | /* Get routing info from the tunnel socket */ |
1066 | dst_release(skb->dst); | 1108 | dst_release(skb->dst); |
1067 | skb->dst = dst_clone(__sk_dst_get(sk_tun)); | 1109 | skb->dst = dst_clone(__sk_dst_get(sk_tun)); |
1068 | skb_orphan(skb); | 1110 | pppol2tp_skb_set_owner_w(skb, sk_tun); |
1069 | skb->sk = sk_tun; | ||
1070 | 1111 | ||
1071 | /* Queue the packet to IP for output */ | 1112 | /* Queue the packet to IP for output */ |
1072 | len = skb->len; | 1113 | len = skb->len; |
@@ -1083,8 +1124,14 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
1083 | session->stats.tx_errors++; | 1124 | session->stats.tx_errors++; |
1084 | } | 1125 | } |
1085 | 1126 | ||
1127 | sock_put(sk_tun); | ||
1128 | sock_put(sk); | ||
1086 | return 1; | 1129 | return 1; |
1087 | 1130 | ||
1131 | abort_put_sess_tun: | ||
1132 | sock_put(sk_tun); | ||
1133 | abort_put_sess: | ||
1134 | sock_put(sk); | ||
1088 | abort: | 1135 | abort: |
1089 | /* Free the original skb */ | 1136 | /* Free the original skb */ |
1090 | kfree_skb(skb); | 1137 | kfree_skb(skb); |
@@ -1188,7 +1235,7 @@ static void pppol2tp_tunnel_destruct(struct sock *sk) | |||
1188 | { | 1235 | { |
1189 | struct pppol2tp_tunnel *tunnel; | 1236 | struct pppol2tp_tunnel *tunnel; |
1190 | 1237 | ||
1191 | tunnel = pppol2tp_sock_to_tunnel(sk); | 1238 | tunnel = sk->sk_user_data; |
1192 | if (tunnel == NULL) | 1239 | if (tunnel == NULL) |
1193 | goto end; | 1240 | goto end; |
1194 | 1241 | ||
@@ -1227,10 +1274,12 @@ static void pppol2tp_session_destruct(struct sock *sk) | |||
1227 | if (sk->sk_user_data != NULL) { | 1274 | if (sk->sk_user_data != NULL) { |
1228 | struct pppol2tp_tunnel *tunnel; | 1275 | struct pppol2tp_tunnel *tunnel; |
1229 | 1276 | ||
1230 | session = pppol2tp_sock_to_session(sk); | 1277 | session = sk->sk_user_data; |
1231 | if (session == NULL) | 1278 | if (session == NULL) |
1232 | goto out; | 1279 | goto out; |
1233 | 1280 | ||
1281 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
1282 | |||
1234 | /* Don't use pppol2tp_sock_to_tunnel() here to | 1283 | /* Don't use pppol2tp_sock_to_tunnel() here to |
1235 | * get the tunnel context because the tunnel | 1284 | * get the tunnel context because the tunnel |
1236 | * socket might have already been closed (its | 1285 | * socket might have already been closed (its |
@@ -1276,6 +1325,7 @@ out: | |||
1276 | static int pppol2tp_release(struct socket *sock) | 1325 | static int pppol2tp_release(struct socket *sock) |
1277 | { | 1326 | { |
1278 | struct sock *sk = sock->sk; | 1327 | struct sock *sk = sock->sk; |
1328 | struct pppol2tp_session *session; | ||
1279 | int error; | 1329 | int error; |
1280 | 1330 | ||
1281 | if (!sk) | 1331 | if (!sk) |
@@ -1293,9 +1343,18 @@ static int pppol2tp_release(struct socket *sock) | |||
1293 | sock_orphan(sk); | 1343 | sock_orphan(sk); |
1294 | sock->sk = NULL; | 1344 | sock->sk = NULL; |
1295 | 1345 | ||
1346 | session = pppol2tp_sock_to_session(sk); | ||
1347 | |||
1296 | /* Purge any queued data */ | 1348 | /* Purge any queued data */ |
1297 | skb_queue_purge(&sk->sk_receive_queue); | 1349 | skb_queue_purge(&sk->sk_receive_queue); |
1298 | skb_queue_purge(&sk->sk_write_queue); | 1350 | skb_queue_purge(&sk->sk_write_queue); |
1351 | if (session != NULL) { | ||
1352 | struct sk_buff *skb; | ||
1353 | while ((skb = skb_dequeue(&session->reorder_q))) { | ||
1354 | kfree_skb(skb); | ||
1355 | sock_put(sk); | ||
1356 | } | ||
1357 | } | ||
1299 | 1358 | ||
1300 | release_sock(sk); | 1359 | release_sock(sk); |
1301 | 1360 | ||
@@ -1598,7 +1657,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
1598 | 1657 | ||
1599 | error = ppp_register_channel(&po->chan); | 1658 | error = ppp_register_channel(&po->chan); |
1600 | if (error) | 1659 | if (error) |
1601 | goto end; | 1660 | goto end_put_tun; |
1602 | 1661 | ||
1603 | /* This is how we get the session context from the socket. */ | 1662 | /* This is how we get the session context from the socket. */ |
1604 | sk->sk_user_data = session; | 1663 | sk->sk_user_data = session; |
@@ -1618,6 +1677,8 @@ out_no_ppp: | |||
1618 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | 1677 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, |
1619 | "%s: created\n", session->name); | 1678 | "%s: created\n", session->name); |
1620 | 1679 | ||
1680 | end_put_tun: | ||
1681 | sock_put(tunnel_sock); | ||
1621 | end: | 1682 | end: |
1622 | release_sock(sk); | 1683 | release_sock(sk); |
1623 | 1684 | ||
@@ -1665,6 +1726,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1665 | *usockaddr_len = len; | 1726 | *usockaddr_len = len; |
1666 | 1727 | ||
1667 | error = 0; | 1728 | error = 0; |
1729 | sock_put(sock->sk); | ||
1668 | 1730 | ||
1669 | end: | 1731 | end: |
1670 | return error; | 1732 | return error; |
@@ -1903,14 +1965,17 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, | |||
1903 | err = -EBADF; | 1965 | err = -EBADF; |
1904 | tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); | 1966 | tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); |
1905 | if (tunnel == NULL) | 1967 | if (tunnel == NULL) |
1906 | goto end; | 1968 | goto end_put_sess; |
1907 | 1969 | ||
1908 | err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg); | 1970 | err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg); |
1909 | goto end; | 1971 | sock_put(session->tunnel_sock); |
1972 | goto end_put_sess; | ||
1910 | } | 1973 | } |
1911 | 1974 | ||
1912 | err = pppol2tp_session_ioctl(session, cmd, arg); | 1975 | err = pppol2tp_session_ioctl(session, cmd, arg); |
1913 | 1976 | ||
1977 | end_put_sess: | ||
1978 | sock_put(sk); | ||
1914 | end: | 1979 | end: |
1915 | return err; | 1980 | return err; |
1916 | } | 1981 | } |
@@ -2056,14 +2121,17 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, | |||
2056 | err = -EBADF; | 2121 | err = -EBADF; |
2057 | tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); | 2122 | tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); |
2058 | if (tunnel == NULL) | 2123 | if (tunnel == NULL) |
2059 | goto end; | 2124 | goto end_put_sess; |
2060 | 2125 | ||
2061 | err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); | 2126 | err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); |
2127 | sock_put(session->tunnel_sock); | ||
2062 | } else | 2128 | } else |
2063 | err = pppol2tp_session_setsockopt(sk, session, optname, val); | 2129 | err = pppol2tp_session_setsockopt(sk, session, optname, val); |
2064 | 2130 | ||
2065 | err = 0; | 2131 | err = 0; |
2066 | 2132 | ||
2133 | end_put_sess: | ||
2134 | sock_put(sk); | ||
2067 | end: | 2135 | end: |
2068 | return err; | 2136 | return err; |
2069 | } | 2137 | } |
@@ -2178,20 +2246,24 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, | |||
2178 | err = -EBADF; | 2246 | err = -EBADF; |
2179 | tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); | 2247 | tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); |
2180 | if (tunnel == NULL) | 2248 | if (tunnel == NULL) |
2181 | goto end; | 2249 | goto end_put_sess; |
2182 | 2250 | ||
2183 | err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); | 2251 | err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); |
2252 | sock_put(session->tunnel_sock); | ||
2184 | } else | 2253 | } else |
2185 | err = pppol2tp_session_getsockopt(sk, session, optname, &val); | 2254 | err = pppol2tp_session_getsockopt(sk, session, optname, &val); |
2186 | 2255 | ||
2187 | err = -EFAULT; | 2256 | err = -EFAULT; |
2188 | if (put_user(len, (int __user *) optlen)) | 2257 | if (put_user(len, (int __user *) optlen)) |
2189 | goto end; | 2258 | goto end_put_sess; |
2190 | 2259 | ||
2191 | if (copy_to_user((void __user *) optval, &val, len)) | 2260 | if (copy_to_user((void __user *) optval, &val, len)) |
2192 | goto end; | 2261 | goto end_put_sess; |
2193 | 2262 | ||
2194 | err = 0; | 2263 | err = 0; |
2264 | |||
2265 | end_put_sess: | ||
2266 | sock_put(sk); | ||
2195 | end: | 2267 | end: |
2196 | return err; | 2268 | return err; |
2197 | } | 2269 | } |
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 169edc154928..858b191517b3 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
@@ -733,7 +733,7 @@ static void r6040_timer(unsigned long data) | |||
733 | } | 733 | } |
734 | 734 | ||
735 | /* Timer active again */ | 735 | /* Timer active again */ |
736 | mod_timer(&lp->timer, jiffies + round_jiffies(HZ)); | 736 | mod_timer(&lp->timer, round_jiffies(jiffies + HZ)); |
737 | } | 737 | } |
738 | 738 | ||
739 | /* Read/set MAC address routines */ | 739 | /* Read/set MAC address routines */ |
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h index 2109508c047a..f8274f8941ea 100644 --- a/drivers/net/s2io-regs.h +++ b/drivers/net/s2io-regs.h | |||
@@ -250,7 +250,7 @@ struct XENA_dev_config { | |||
250 | u64 tx_mat0_n[0x8]; | 250 | u64 tx_mat0_n[0x8]; |
251 | #define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) | 251 | #define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) |
252 | 252 | ||
253 | u8 unused_1[0x8]; | 253 | u64 xmsi_mask_reg; |
254 | u64 stat_byte_cnt; | 254 | u64 stat_byte_cnt; |
255 | #define STAT_BC(n) vBIT(n,4,12) | 255 | #define STAT_BC(n) vBIT(n,4,12) |
256 | 256 | ||
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 523478ebfd69..b5c1e663417d 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -86,7 +86,7 @@ | |||
86 | #include "s2io.h" | 86 | #include "s2io.h" |
87 | #include "s2io-regs.h" | 87 | #include "s2io-regs.h" |
88 | 88 | ||
89 | #define DRV_VERSION "2.0.26.23" | 89 | #define DRV_VERSION "2.0.26.24" |
90 | 90 | ||
91 | /* S2io Driver name & version. */ | 91 | /* S2io Driver name & version. */ |
92 | static char s2io_driver_name[] = "Neterion"; | 92 | static char s2io_driver_name[] = "Neterion"; |
@@ -1113,9 +1113,10 @@ static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev) | |||
1113 | struct pci_dev *tdev = NULL; | 1113 | struct pci_dev *tdev = NULL; |
1114 | while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { | 1114 | while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { |
1115 | if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { | 1115 | if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { |
1116 | if (tdev->bus == s2io_pdev->bus->parent) | 1116 | if (tdev->bus == s2io_pdev->bus->parent) { |
1117 | pci_dev_put(tdev); | 1117 | pci_dev_put(tdev); |
1118 | return 1; | 1118 | return 1; |
1119 | } | ||
1119 | } | 1120 | } |
1120 | } | 1121 | } |
1121 | return 0; | 1122 | return 0; |
@@ -1219,15 +1220,33 @@ static int init_tti(struct s2io_nic *nic, int link) | |||
1219 | TTI_DATA1_MEM_TX_URNG_B(0x10) | | 1220 | TTI_DATA1_MEM_TX_URNG_B(0x10) | |
1220 | TTI_DATA1_MEM_TX_URNG_C(0x30) | | 1221 | TTI_DATA1_MEM_TX_URNG_C(0x30) | |
1221 | TTI_DATA1_MEM_TX_TIMER_AC_EN; | 1222 | TTI_DATA1_MEM_TX_TIMER_AC_EN; |
1222 | 1223 | if (i == 0) | |
1223 | if (use_continuous_tx_intrs && (link == LINK_UP)) | 1224 | if (use_continuous_tx_intrs && (link == LINK_UP)) |
1224 | val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; | 1225 | val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; |
1225 | writeq(val64, &bar0->tti_data1_mem); | 1226 | writeq(val64, &bar0->tti_data1_mem); |
1226 | 1227 | ||
1227 | val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | | 1228 | if (nic->config.intr_type == MSI_X) { |
1228 | TTI_DATA2_MEM_TX_UFC_B(0x20) | | 1229 | val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | |
1229 | TTI_DATA2_MEM_TX_UFC_C(0x40) | | 1230 | TTI_DATA2_MEM_TX_UFC_B(0x100) | |
1230 | TTI_DATA2_MEM_TX_UFC_D(0x80); | 1231 | TTI_DATA2_MEM_TX_UFC_C(0x200) | |
1232 | TTI_DATA2_MEM_TX_UFC_D(0x300); | ||
1233 | } else { | ||
1234 | if ((nic->config.tx_steering_type == | ||
1235 | TX_DEFAULT_STEERING) && | ||
1236 | (config->tx_fifo_num > 1) && | ||
1237 | (i >= nic->udp_fifo_idx) && | ||
1238 | (i < (nic->udp_fifo_idx + | ||
1239 | nic->total_udp_fifos))) | ||
1240 | val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) | | ||
1241 | TTI_DATA2_MEM_TX_UFC_B(0x80) | | ||
1242 | TTI_DATA2_MEM_TX_UFC_C(0x100) | | ||
1243 | TTI_DATA2_MEM_TX_UFC_D(0x120); | ||
1244 | else | ||
1245 | val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | | ||
1246 | TTI_DATA2_MEM_TX_UFC_B(0x20) | | ||
1247 | TTI_DATA2_MEM_TX_UFC_C(0x40) | | ||
1248 | TTI_DATA2_MEM_TX_UFC_D(0x80); | ||
1249 | } | ||
1231 | 1250 | ||
1232 | writeq(val64, &bar0->tti_data2_mem); | 1251 | writeq(val64, &bar0->tti_data2_mem); |
1233 | 1252 | ||
@@ -2813,6 +2832,15 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
2813 | } | 2832 | } |
2814 | } | 2833 | } |
2815 | 2834 | ||
2835 | static int s2io_chk_rx_buffers(struct ring_info *ring) | ||
2836 | { | ||
2837 | if (fill_rx_buffers(ring) == -ENOMEM) { | ||
2838 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); | ||
2839 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); | ||
2840 | } | ||
2841 | return 0; | ||
2842 | } | ||
2843 | |||
2816 | /** | 2844 | /** |
2817 | * s2io_poll - Rx interrupt handler for NAPI support | 2845 | * s2io_poll - Rx interrupt handler for NAPI support |
2818 | * @napi : pointer to the napi structure. | 2846 | * @napi : pointer to the napi structure. |
@@ -2826,57 +2854,73 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
2826 | * 0 on success and 1 if there are No Rx packets to be processed. | 2854 | * 0 on success and 1 if there are No Rx packets to be processed. |
2827 | */ | 2855 | */ |
2828 | 2856 | ||
2829 | static int s2io_poll(struct napi_struct *napi, int budget) | 2857 | static int s2io_poll_msix(struct napi_struct *napi, int budget) |
2830 | { | 2858 | { |
2831 | struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); | 2859 | struct ring_info *ring = container_of(napi, struct ring_info, napi); |
2832 | struct net_device *dev = nic->dev; | 2860 | struct net_device *dev = ring->dev; |
2833 | int pkt_cnt = 0, org_pkts_to_process; | ||
2834 | struct mac_info *mac_control; | ||
2835 | struct config_param *config; | 2861 | struct config_param *config; |
2862 | struct mac_info *mac_control; | ||
2863 | int pkts_processed = 0; | ||
2864 | u8 __iomem *addr = NULL; | ||
2865 | u8 val8 = 0; | ||
2866 | struct s2io_nic *nic = dev->priv; | ||
2836 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | 2867 | struct XENA_dev_config __iomem *bar0 = nic->bar0; |
2837 | int i; | 2868 | int budget_org = budget; |
2838 | 2869 | ||
2839 | mac_control = &nic->mac_control; | ||
2840 | config = &nic->config; | 2870 | config = &nic->config; |
2871 | mac_control = &nic->mac_control; | ||
2841 | 2872 | ||
2842 | nic->pkts_to_process = budget; | 2873 | if (unlikely(!is_s2io_card_up(nic))) |
2843 | org_pkts_to_process = nic->pkts_to_process; | 2874 | return 0; |
2844 | 2875 | ||
2845 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); | 2876 | pkts_processed = rx_intr_handler(ring, budget); |
2846 | readl(&bar0->rx_traffic_int); | 2877 | s2io_chk_rx_buffers(ring); |
2847 | 2878 | ||
2848 | for (i = 0; i < config->rx_ring_num; i++) { | 2879 | if (pkts_processed < budget_org) { |
2849 | rx_intr_handler(&mac_control->rings[i]); | 2880 | netif_rx_complete(dev, napi); |
2850 | pkt_cnt = org_pkts_to_process - nic->pkts_to_process; | 2881 | /*Re Enable MSI-Rx Vector*/ |
2851 | if (!nic->pkts_to_process) { | 2882 | addr = (u8 __iomem *)&bar0->xmsi_mask_reg; |
2852 | /* Quota for the current iteration has been met */ | 2883 | addr += 7 - ring->ring_no; |
2853 | goto no_rx; | 2884 | val8 = (ring->ring_no == 0) ? 0x3f : 0xbf; |
2854 | } | 2885 | writeb(val8, addr); |
2886 | val8 = readb(addr); | ||
2855 | } | 2887 | } |
2888 | return pkts_processed; | ||
2889 | } | ||
2890 | static int s2io_poll_inta(struct napi_struct *napi, int budget) | ||
2891 | { | ||
2892 | struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); | ||
2893 | struct ring_info *ring; | ||
2894 | struct net_device *dev = nic->dev; | ||
2895 | struct config_param *config; | ||
2896 | struct mac_info *mac_control; | ||
2897 | int pkts_processed = 0; | ||
2898 | int ring_pkts_processed, i; | ||
2899 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | ||
2900 | int budget_org = budget; | ||
2856 | 2901 | ||
2857 | netif_rx_complete(dev, napi); | 2902 | config = &nic->config; |
2903 | mac_control = &nic->mac_control; | ||
2858 | 2904 | ||
2859 | for (i = 0; i < config->rx_ring_num; i++) { | 2905 | if (unlikely(!is_s2io_card_up(nic))) |
2860 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { | 2906 | return 0; |
2861 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | ||
2862 | DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); | ||
2863 | break; | ||
2864 | } | ||
2865 | } | ||
2866 | /* Re enable the Rx interrupts. */ | ||
2867 | writeq(0x0, &bar0->rx_traffic_mask); | ||
2868 | readl(&bar0->rx_traffic_mask); | ||
2869 | return pkt_cnt; | ||
2870 | 2907 | ||
2871 | no_rx: | ||
2872 | for (i = 0; i < config->rx_ring_num; i++) { | 2908 | for (i = 0; i < config->rx_ring_num; i++) { |
2873 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { | 2909 | ring = &mac_control->rings[i]; |
2874 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | 2910 | ring_pkts_processed = rx_intr_handler(ring, budget); |
2875 | DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); | 2911 | s2io_chk_rx_buffers(ring); |
2912 | pkts_processed += ring_pkts_processed; | ||
2913 | budget -= ring_pkts_processed; | ||
2914 | if (budget <= 0) | ||
2876 | break; | 2915 | break; |
2877 | } | ||
2878 | } | 2916 | } |
2879 | return pkt_cnt; | 2917 | if (pkts_processed < budget_org) { |
2918 | netif_rx_complete(dev, napi); | ||
2919 | /* Re enable the Rx interrupts for the ring */ | ||
2920 | writeq(0, &bar0->rx_traffic_mask); | ||
2921 | readl(&bar0->rx_traffic_mask); | ||
2922 | } | ||
2923 | return pkts_processed; | ||
2880 | } | 2924 | } |
2881 | 2925 | ||
2882 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2926 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -2918,7 +2962,7 @@ static void s2io_netpoll(struct net_device *dev) | |||
2918 | 2962 | ||
2919 | /* check for received packet and indicate up to network */ | 2963 | /* check for received packet and indicate up to network */ |
2920 | for (i = 0; i < config->rx_ring_num; i++) | 2964 | for (i = 0; i < config->rx_ring_num; i++) |
2921 | rx_intr_handler(&mac_control->rings[i]); | 2965 | rx_intr_handler(&mac_control->rings[i], 0); |
2922 | 2966 | ||
2923 | for (i = 0; i < config->rx_ring_num; i++) { | 2967 | for (i = 0; i < config->rx_ring_num; i++) { |
2924 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { | 2968 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { |
@@ -2934,7 +2978,8 @@ static void s2io_netpoll(struct net_device *dev) | |||
2934 | 2978 | ||
2935 | /** | 2979 | /** |
2936 | * rx_intr_handler - Rx interrupt handler | 2980 | * rx_intr_handler - Rx interrupt handler |
2937 | * @nic: device private variable. | 2981 | * @ring_info: per ring structure. |
2982 | * @budget: budget for napi processing. | ||
2938 | * Description: | 2983 | * Description: |
2939 | * If the interrupt is because of a received frame or if the | 2984 | * If the interrupt is because of a received frame or if the |
2940 | * receive ring contains fresh as yet un-processed frames,this function is | 2985 | * receive ring contains fresh as yet un-processed frames,this function is |
@@ -2942,15 +2987,15 @@ static void s2io_netpoll(struct net_device *dev) | |||
2942 | * stopped and sends the skb to the OSM's Rx handler and then increments | 2987 | * stopped and sends the skb to the OSM's Rx handler and then increments |
2943 | * the offset. | 2988 | * the offset. |
2944 | * Return Value: | 2989 | * Return Value: |
2945 | * NONE. | 2990 | * No. of napi packets processed. |
2946 | */ | 2991 | */ |
2947 | static void rx_intr_handler(struct ring_info *ring_data) | 2992 | static int rx_intr_handler(struct ring_info *ring_data, int budget) |
2948 | { | 2993 | { |
2949 | int get_block, put_block; | 2994 | int get_block, put_block; |
2950 | struct rx_curr_get_info get_info, put_info; | 2995 | struct rx_curr_get_info get_info, put_info; |
2951 | struct RxD_t *rxdp; | 2996 | struct RxD_t *rxdp; |
2952 | struct sk_buff *skb; | 2997 | struct sk_buff *skb; |
2953 | int pkt_cnt = 0; | 2998 | int pkt_cnt = 0, napi_pkts = 0; |
2954 | int i; | 2999 | int i; |
2955 | struct RxD1* rxdp1; | 3000 | struct RxD1* rxdp1; |
2956 | struct RxD3* rxdp3; | 3001 | struct RxD3* rxdp3; |
@@ -2977,7 +3022,7 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
2977 | DBG_PRINT(ERR_DBG, "%s: The skb is ", | 3022 | DBG_PRINT(ERR_DBG, "%s: The skb is ", |
2978 | ring_data->dev->name); | 3023 | ring_data->dev->name); |
2979 | DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); | 3024 | DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); |
2980 | return; | 3025 | return 0; |
2981 | } | 3026 | } |
2982 | if (ring_data->rxd_mode == RXD_MODE_1) { | 3027 | if (ring_data->rxd_mode == RXD_MODE_1) { |
2983 | rxdp1 = (struct RxD1*)rxdp; | 3028 | rxdp1 = (struct RxD1*)rxdp; |
@@ -3014,9 +3059,10 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
3014 | rxdp = ring_data->rx_blocks[get_block].block_virt_addr; | 3059 | rxdp = ring_data->rx_blocks[get_block].block_virt_addr; |
3015 | } | 3060 | } |
3016 | 3061 | ||
3017 | if(ring_data->nic->config.napi){ | 3062 | if (ring_data->nic->config.napi) { |
3018 | ring_data->nic->pkts_to_process -= 1; | 3063 | budget--; |
3019 | if (!ring_data->nic->pkts_to_process) | 3064 | napi_pkts++; |
3065 | if (!budget) | ||
3020 | break; | 3066 | break; |
3021 | } | 3067 | } |
3022 | pkt_cnt++; | 3068 | pkt_cnt++; |
@@ -3034,6 +3080,7 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
3034 | } | 3080 | } |
3035 | } | 3081 | } |
3036 | } | 3082 | } |
3083 | return(napi_pkts); | ||
3037 | } | 3084 | } |
3038 | 3085 | ||
3039 | /** | 3086 | /** |
@@ -3730,14 +3777,19 @@ static void restore_xmsi_data(struct s2io_nic *nic) | |||
3730 | { | 3777 | { |
3731 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | 3778 | struct XENA_dev_config __iomem *bar0 = nic->bar0; |
3732 | u64 val64; | 3779 | u64 val64; |
3733 | int i; | 3780 | int i, msix_index; |
3781 | |||
3782 | |||
3783 | if (nic->device_type == XFRAME_I_DEVICE) | ||
3784 | return; | ||
3734 | 3785 | ||
3735 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { | 3786 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { |
3787 | msix_index = (i) ? ((i-1) * 8 + 1): 0; | ||
3736 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); | 3788 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); |
3737 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); | 3789 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); |
3738 | val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6)); | 3790 | val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); |
3739 | writeq(val64, &bar0->xmsi_access); | 3791 | writeq(val64, &bar0->xmsi_access); |
3740 | if (wait_for_msix_trans(nic, i)) { | 3792 | if (wait_for_msix_trans(nic, msix_index)) { |
3741 | DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); | 3793 | DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); |
3742 | continue; | 3794 | continue; |
3743 | } | 3795 | } |
@@ -3748,13 +3800,17 @@ static void store_xmsi_data(struct s2io_nic *nic) | |||
3748 | { | 3800 | { |
3749 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | 3801 | struct XENA_dev_config __iomem *bar0 = nic->bar0; |
3750 | u64 val64, addr, data; | 3802 | u64 val64, addr, data; |
3751 | int i; | 3803 | int i, msix_index; |
3804 | |||
3805 | if (nic->device_type == XFRAME_I_DEVICE) | ||
3806 | return; | ||
3752 | 3807 | ||
3753 | /* Store and display */ | 3808 | /* Store and display */ |
3754 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { | 3809 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { |
3755 | val64 = (s2BIT(15) | vBIT(i, 26, 6)); | 3810 | msix_index = (i) ? ((i-1) * 8 + 1): 0; |
3811 | val64 = (s2BIT(15) | vBIT(msix_index, 26, 6)); | ||
3756 | writeq(val64, &bar0->xmsi_access); | 3812 | writeq(val64, &bar0->xmsi_access); |
3757 | if (wait_for_msix_trans(nic, i)) { | 3813 | if (wait_for_msix_trans(nic, msix_index)) { |
3758 | DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); | 3814 | DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); |
3759 | continue; | 3815 | continue; |
3760 | } | 3816 | } |
@@ -3770,11 +3826,11 @@ static void store_xmsi_data(struct s2io_nic *nic) | |||
3770 | static int s2io_enable_msi_x(struct s2io_nic *nic) | 3826 | static int s2io_enable_msi_x(struct s2io_nic *nic) |
3771 | { | 3827 | { |
3772 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | 3828 | struct XENA_dev_config __iomem *bar0 = nic->bar0; |
3773 | u64 tx_mat, rx_mat; | 3829 | u64 rx_mat; |
3774 | u16 msi_control; /* Temp variable */ | 3830 | u16 msi_control; /* Temp variable */ |
3775 | int ret, i, j, msix_indx = 1; | 3831 | int ret, i, j, msix_indx = 1; |
3776 | 3832 | ||
3777 | nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry), | 3833 | nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry), |
3778 | GFP_KERNEL); | 3834 | GFP_KERNEL); |
3779 | if (!nic->entries) { | 3835 | if (!nic->entries) { |
3780 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ | 3836 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ |
@@ -3783,10 +3839,12 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) | |||
3783 | return -ENOMEM; | 3839 | return -ENOMEM; |
3784 | } | 3840 | } |
3785 | nic->mac_control.stats_info->sw_stat.mem_allocated | 3841 | nic->mac_control.stats_info->sw_stat.mem_allocated |
3786 | += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); | 3842 | += (nic->num_entries * sizeof(struct msix_entry)); |
3843 | |||
3844 | memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry)); | ||
3787 | 3845 | ||
3788 | nic->s2io_entries = | 3846 | nic->s2io_entries = |
3789 | kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry), | 3847 | kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry), |
3790 | GFP_KERNEL); | 3848 | GFP_KERNEL); |
3791 | if (!nic->s2io_entries) { | 3849 | if (!nic->s2io_entries) { |
3792 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", | 3850 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", |
@@ -3794,60 +3852,52 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) | |||
3794 | nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; | 3852 | nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; |
3795 | kfree(nic->entries); | 3853 | kfree(nic->entries); |
3796 | nic->mac_control.stats_info->sw_stat.mem_freed | 3854 | nic->mac_control.stats_info->sw_stat.mem_freed |
3797 | += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); | 3855 | += (nic->num_entries * sizeof(struct msix_entry)); |
3798 | return -ENOMEM; | 3856 | return -ENOMEM; |
3799 | } | 3857 | } |
3800 | nic->mac_control.stats_info->sw_stat.mem_allocated | 3858 | nic->mac_control.stats_info->sw_stat.mem_allocated |
3801 | += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); | 3859 | += (nic->num_entries * sizeof(struct s2io_msix_entry)); |
3802 | 3860 | memset(nic->s2io_entries, 0, | |
3803 | for (i=0; i< MAX_REQUESTED_MSI_X; i++) { | 3861 | nic->num_entries * sizeof(struct s2io_msix_entry)); |
3804 | nic->entries[i].entry = i; | 3862 | |
3805 | nic->s2io_entries[i].entry = i; | 3863 | nic->entries[0].entry = 0; |
3864 | nic->s2io_entries[0].entry = 0; | ||
3865 | nic->s2io_entries[0].in_use = MSIX_FLG; | ||
3866 | nic->s2io_entries[0].type = MSIX_ALARM_TYPE; | ||
3867 | nic->s2io_entries[0].arg = &nic->mac_control.fifos; | ||
3868 | |||
3869 | for (i = 1; i < nic->num_entries; i++) { | ||
3870 | nic->entries[i].entry = ((i - 1) * 8) + 1; | ||
3871 | nic->s2io_entries[i].entry = ((i - 1) * 8) + 1; | ||
3806 | nic->s2io_entries[i].arg = NULL; | 3872 | nic->s2io_entries[i].arg = NULL; |
3807 | nic->s2io_entries[i].in_use = 0; | 3873 | nic->s2io_entries[i].in_use = 0; |
3808 | } | 3874 | } |
3809 | 3875 | ||
3810 | tx_mat = readq(&bar0->tx_mat0_n[0]); | ||
3811 | for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) { | ||
3812 | tx_mat |= TX_MAT_SET(i, msix_indx); | ||
3813 | nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i]; | ||
3814 | nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE; | ||
3815 | nic->s2io_entries[msix_indx].in_use = MSIX_FLG; | ||
3816 | } | ||
3817 | writeq(tx_mat, &bar0->tx_mat0_n[0]); | ||
3818 | |||
3819 | rx_mat = readq(&bar0->rx_mat); | 3876 | rx_mat = readq(&bar0->rx_mat); |
3820 | for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) { | 3877 | for (j = 0; j < nic->config.rx_ring_num; j++) { |
3821 | rx_mat |= RX_MAT_SET(j, msix_indx); | 3878 | rx_mat |= RX_MAT_SET(j, msix_indx); |
3822 | nic->s2io_entries[msix_indx].arg | 3879 | nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j]; |
3823 | = &nic->mac_control.rings[j]; | 3880 | nic->s2io_entries[j+1].type = MSIX_RING_TYPE; |
3824 | nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; | 3881 | nic->s2io_entries[j+1].in_use = MSIX_FLG; |
3825 | nic->s2io_entries[msix_indx].in_use = MSIX_FLG; | 3882 | msix_indx += 8; |
3826 | } | 3883 | } |
3827 | writeq(rx_mat, &bar0->rx_mat); | 3884 | writeq(rx_mat, &bar0->rx_mat); |
3885 | readq(&bar0->rx_mat); | ||
3828 | 3886 | ||
3829 | nic->avail_msix_vectors = 0; | 3887 | ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries); |
3830 | ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X); | ||
3831 | /* We fail init if error or we get less vectors than min required */ | 3888 | /* We fail init if error or we get less vectors than min required */ |
3832 | if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) { | ||
3833 | nic->avail_msix_vectors = ret; | ||
3834 | ret = pci_enable_msix(nic->pdev, nic->entries, ret); | ||
3835 | } | ||
3836 | if (ret) { | 3889 | if (ret) { |
3837 | DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); | 3890 | DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); |
3838 | kfree(nic->entries); | 3891 | kfree(nic->entries); |
3839 | nic->mac_control.stats_info->sw_stat.mem_freed | 3892 | nic->mac_control.stats_info->sw_stat.mem_freed |
3840 | += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); | 3893 | += (nic->num_entries * sizeof(struct msix_entry)); |
3841 | kfree(nic->s2io_entries); | 3894 | kfree(nic->s2io_entries); |
3842 | nic->mac_control.stats_info->sw_stat.mem_freed | 3895 | nic->mac_control.stats_info->sw_stat.mem_freed |
3843 | += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); | 3896 | += (nic->num_entries * sizeof(struct s2io_msix_entry)); |
3844 | nic->entries = NULL; | 3897 | nic->entries = NULL; |
3845 | nic->s2io_entries = NULL; | 3898 | nic->s2io_entries = NULL; |
3846 | nic->avail_msix_vectors = 0; | ||
3847 | return -ENOMEM; | 3899 | return -ENOMEM; |
3848 | } | 3900 | } |
3849 | if (!nic->avail_msix_vectors) | ||
3850 | nic->avail_msix_vectors = MAX_REQUESTED_MSI_X; | ||
3851 | 3901 | ||
3852 | /* | 3902 | /* |
3853 | * To enable MSI-X, MSI also needs to be enabled, due to a bug | 3903 | * To enable MSI-X, MSI also needs to be enabled, due to a bug |
@@ -3919,7 +3969,7 @@ static void remove_msix_isr(struct s2io_nic *sp) | |||
3919 | int i; | 3969 | int i; |
3920 | u16 msi_control; | 3970 | u16 msi_control; |
3921 | 3971 | ||
3922 | for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { | 3972 | for (i = 0; i < sp->num_entries; i++) { |
3923 | if (sp->s2io_entries[i].in_use == | 3973 | if (sp->s2io_entries[i].in_use == |
3924 | MSIX_REGISTERED_SUCCESS) { | 3974 | MSIX_REGISTERED_SUCCESS) { |
3925 | int vector = sp->entries[i].vector; | 3975 | int vector = sp->entries[i].vector; |
@@ -3975,29 +4025,6 @@ static int s2io_open(struct net_device *dev) | |||
3975 | netif_carrier_off(dev); | 4025 | netif_carrier_off(dev); |
3976 | sp->last_link_state = 0; | 4026 | sp->last_link_state = 0; |
3977 | 4027 | ||
3978 | if (sp->config.intr_type == MSI_X) { | ||
3979 | int ret = s2io_enable_msi_x(sp); | ||
3980 | |||
3981 | if (!ret) { | ||
3982 | ret = s2io_test_msi(sp); | ||
3983 | /* rollback MSI-X, will re-enable during add_isr() */ | ||
3984 | remove_msix_isr(sp); | ||
3985 | } | ||
3986 | if (ret) { | ||
3987 | |||
3988 | DBG_PRINT(ERR_DBG, | ||
3989 | "%s: MSI-X requested but failed to enable\n", | ||
3990 | dev->name); | ||
3991 | sp->config.intr_type = INTA; | ||
3992 | } | ||
3993 | } | ||
3994 | |||
3995 | /* NAPI doesn't work well with MSI(X) */ | ||
3996 | if (sp->config.intr_type != INTA) { | ||
3997 | if(sp->config.napi) | ||
3998 | sp->config.napi = 0; | ||
3999 | } | ||
4000 | |||
4001 | /* Initialize H/W and enable interrupts */ | 4028 | /* Initialize H/W and enable interrupts */ |
4002 | err = s2io_card_up(sp); | 4029 | err = s2io_card_up(sp); |
4003 | if (err) { | 4030 | if (err) { |
@@ -4020,12 +4047,12 @@ hw_init_failed: | |||
4020 | if (sp->entries) { | 4047 | if (sp->entries) { |
4021 | kfree(sp->entries); | 4048 | kfree(sp->entries); |
4022 | sp->mac_control.stats_info->sw_stat.mem_freed | 4049 | sp->mac_control.stats_info->sw_stat.mem_freed |
4023 | += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); | 4050 | += (sp->num_entries * sizeof(struct msix_entry)); |
4024 | } | 4051 | } |
4025 | if (sp->s2io_entries) { | 4052 | if (sp->s2io_entries) { |
4026 | kfree(sp->s2io_entries); | 4053 | kfree(sp->s2io_entries); |
4027 | sp->mac_control.stats_info->sw_stat.mem_freed | 4054 | sp->mac_control.stats_info->sw_stat.mem_freed |
4028 | += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); | 4055 | += (sp->num_entries * sizeof(struct s2io_msix_entry)); |
4029 | } | 4056 | } |
4030 | } | 4057 | } |
4031 | return err; | 4058 | return err; |
@@ -4327,40 +4354,65 @@ s2io_alarm_handle(unsigned long data) | |||
4327 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); | 4354 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); |
4328 | } | 4355 | } |
4329 | 4356 | ||
4330 | static int s2io_chk_rx_buffers(struct ring_info *ring) | ||
4331 | { | ||
4332 | if (fill_rx_buffers(ring) == -ENOMEM) { | ||
4333 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); | ||
4334 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); | ||
4335 | } | ||
4336 | return 0; | ||
4337 | } | ||
4338 | |||
4339 | static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) | 4357 | static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) |
4340 | { | 4358 | { |
4341 | struct ring_info *ring = (struct ring_info *)dev_id; | 4359 | struct ring_info *ring = (struct ring_info *)dev_id; |
4342 | struct s2io_nic *sp = ring->nic; | 4360 | struct s2io_nic *sp = ring->nic; |
4361 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | ||
4362 | struct net_device *dev = sp->dev; | ||
4343 | 4363 | ||
4344 | if (!is_s2io_card_up(sp)) | 4364 | if (unlikely(!is_s2io_card_up(sp))) |
4345 | return IRQ_HANDLED; | 4365 | return IRQ_HANDLED; |
4346 | 4366 | ||
4347 | rx_intr_handler(ring); | 4367 | if (sp->config.napi) { |
4348 | s2io_chk_rx_buffers(ring); | 4368 | u8 __iomem *addr = NULL; |
4369 | u8 val8 = 0; | ||
4370 | |||
4371 | addr = (u8 __iomem *)&bar0->xmsi_mask_reg; | ||
4372 | addr += (7 - ring->ring_no); | ||
4373 | val8 = (ring->ring_no == 0) ? 0x7f : 0xff; | ||
4374 | writeb(val8, addr); | ||
4375 | val8 = readb(addr); | ||
4376 | netif_rx_schedule(dev, &ring->napi); | ||
4377 | } else { | ||
4378 | rx_intr_handler(ring, 0); | ||
4379 | s2io_chk_rx_buffers(ring); | ||
4380 | } | ||
4349 | 4381 | ||
4350 | return IRQ_HANDLED; | 4382 | return IRQ_HANDLED; |
4351 | } | 4383 | } |
4352 | 4384 | ||
4353 | static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) | 4385 | static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) |
4354 | { | 4386 | { |
4355 | struct fifo_info *fifo = (struct fifo_info *)dev_id; | 4387 | int i; |
4356 | struct s2io_nic *sp = fifo->nic; | 4388 | struct fifo_info *fifos = (struct fifo_info *)dev_id; |
4389 | struct s2io_nic *sp = fifos->nic; | ||
4390 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | ||
4391 | struct config_param *config = &sp->config; | ||
4392 | u64 reason; | ||
4357 | 4393 | ||
4358 | if (!is_s2io_card_up(sp)) | 4394 | if (unlikely(!is_s2io_card_up(sp))) |
4395 | return IRQ_NONE; | ||
4396 | |||
4397 | reason = readq(&bar0->general_int_status); | ||
4398 | if (unlikely(reason == S2IO_MINUS_ONE)) | ||
4399 | /* Nothing much can be done. Get out */ | ||
4359 | return IRQ_HANDLED; | 4400 | return IRQ_HANDLED; |
4360 | 4401 | ||
4361 | tx_intr_handler(fifo); | 4402 | writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); |
4403 | |||
4404 | if (reason & GEN_INTR_TXTRAFFIC) | ||
4405 | writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); | ||
4406 | |||
4407 | for (i = 0; i < config->tx_fifo_num; i++) | ||
4408 | tx_intr_handler(&fifos[i]); | ||
4409 | |||
4410 | writeq(sp->general_int_mask, &bar0->general_int_mask); | ||
4411 | readl(&bar0->general_int_status); | ||
4412 | |||
4362 | return IRQ_HANDLED; | 4413 | return IRQ_HANDLED; |
4363 | } | 4414 | } |
4415 | |||
4364 | static void s2io_txpic_intr_handle(struct s2io_nic *sp) | 4416 | static void s2io_txpic_intr_handle(struct s2io_nic *sp) |
4365 | { | 4417 | { |
4366 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 4418 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
@@ -4762,14 +4814,10 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4762 | 4814 | ||
4763 | if (config->napi) { | 4815 | if (config->napi) { |
4764 | if (reason & GEN_INTR_RXTRAFFIC) { | 4816 | if (reason & GEN_INTR_RXTRAFFIC) { |
4765 | if (likely(netif_rx_schedule_prep(dev, | 4817 | netif_rx_schedule(dev, &sp->napi); |
4766 | &sp->napi))) { | 4818 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); |
4767 | __netif_rx_schedule(dev, &sp->napi); | 4819 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); |
4768 | writeq(S2IO_MINUS_ONE, | 4820 | readl(&bar0->rx_traffic_int); |
4769 | &bar0->rx_traffic_mask); | ||
4770 | } else | ||
4771 | writeq(S2IO_MINUS_ONE, | ||
4772 | &bar0->rx_traffic_int); | ||
4773 | } | 4821 | } |
4774 | } else { | 4822 | } else { |
4775 | /* | 4823 | /* |
@@ -4781,7 +4829,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4781 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); | 4829 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); |
4782 | 4830 | ||
4783 | for (i = 0; i < config->rx_ring_num; i++) | 4831 | for (i = 0; i < config->rx_ring_num; i++) |
4784 | rx_intr_handler(&mac_control->rings[i]); | 4832 | rx_intr_handler(&mac_control->rings[i], 0); |
4785 | } | 4833 | } |
4786 | 4834 | ||
4787 | /* | 4835 | /* |
@@ -6984,62 +7032,62 @@ static int s2io_add_isr(struct s2io_nic * sp) | |||
6984 | 7032 | ||
6985 | /* After proper initialization of H/W, register ISR */ | 7033 | /* After proper initialization of H/W, register ISR */ |
6986 | if (sp->config.intr_type == MSI_X) { | 7034 | if (sp->config.intr_type == MSI_X) { |
6987 | int i, msix_tx_cnt=0,msix_rx_cnt=0; | 7035 | int i, msix_rx_cnt = 0; |
6988 | 7036 | ||
6989 | for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { | 7037 | for (i = 0; i < sp->num_entries; i++) { |
6990 | if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { | 7038 | if (sp->s2io_entries[i].in_use == MSIX_FLG) { |
6991 | sprintf(sp->desc[i], "%s:MSI-X-%d-TX", | 7039 | if (sp->s2io_entries[i].type == |
7040 | MSIX_RING_TYPE) { | ||
7041 | sprintf(sp->desc[i], "%s:MSI-X-%d-RX", | ||
7042 | dev->name, i); | ||
7043 | err = request_irq(sp->entries[i].vector, | ||
7044 | s2io_msix_ring_handle, 0, | ||
7045 | sp->desc[i], | ||
7046 | sp->s2io_entries[i].arg); | ||
7047 | } else if (sp->s2io_entries[i].type == | ||
7048 | MSIX_ALARM_TYPE) { | ||
7049 | sprintf(sp->desc[i], "%s:MSI-X-%d-TX", | ||
6992 | dev->name, i); | 7050 | dev->name, i); |
6993 | err = request_irq(sp->entries[i].vector, | 7051 | err = request_irq(sp->entries[i].vector, |
6994 | s2io_msix_fifo_handle, 0, sp->desc[i], | 7052 | s2io_msix_fifo_handle, 0, |
6995 | sp->s2io_entries[i].arg); | 7053 | sp->desc[i], |
6996 | /* If either data or addr is zero print it */ | 7054 | sp->s2io_entries[i].arg); |
6997 | if(!(sp->msix_info[i].addr && | 7055 | |
6998 | sp->msix_info[i].data)) { | ||
6999 | DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " | ||
7000 | "Data:0x%llx\n",sp->desc[i], | ||
7001 | (unsigned long long) | ||
7002 | sp->msix_info[i].addr, | ||
7003 | (unsigned long long) | ||
7004 | sp->msix_info[i].data); | ||
7005 | } else { | ||
7006 | msix_tx_cnt++; | ||
7007 | } | 7056 | } |
7008 | } else { | 7057 | /* if either data or addr is zero print it. */ |
7009 | sprintf(sp->desc[i], "%s:MSI-X-%d-RX", | 7058 | if (!(sp->msix_info[i].addr && |
7010 | dev->name, i); | ||
7011 | err = request_irq(sp->entries[i].vector, | ||
7012 | s2io_msix_ring_handle, 0, sp->desc[i], | ||
7013 | sp->s2io_entries[i].arg); | ||
7014 | /* If either data or addr is zero print it */ | ||
7015 | if(!(sp->msix_info[i].addr && | ||
7016 | sp->msix_info[i].data)) { | 7059 | sp->msix_info[i].data)) { |
7017 | DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " | 7060 | DBG_PRINT(ERR_DBG, |
7018 | "Data:0x%llx\n",sp->desc[i], | 7061 | "%s @Addr:0x%llx Data:0x%llx\n", |
7062 | sp->desc[i], | ||
7019 | (unsigned long long) | 7063 | (unsigned long long) |
7020 | sp->msix_info[i].addr, | 7064 | sp->msix_info[i].addr, |
7021 | (unsigned long long) | 7065 | (unsigned long long) |
7022 | sp->msix_info[i].data); | 7066 | ntohl(sp->msix_info[i].data)); |
7023 | } else { | 7067 | } else |
7024 | msix_rx_cnt++; | 7068 | msix_rx_cnt++; |
7069 | if (err) { | ||
7070 | remove_msix_isr(sp); | ||
7071 | |||
7072 | DBG_PRINT(ERR_DBG, | ||
7073 | "%s:MSI-X-%d registration " | ||
7074 | "failed\n", dev->name, i); | ||
7075 | |||
7076 | DBG_PRINT(ERR_DBG, | ||
7077 | "%s: Defaulting to INTA\n", | ||
7078 | dev->name); | ||
7079 | sp->config.intr_type = INTA; | ||
7080 | break; | ||
7025 | } | 7081 | } |
7082 | sp->s2io_entries[i].in_use = | ||
7083 | MSIX_REGISTERED_SUCCESS; | ||
7026 | } | 7084 | } |
7027 | if (err) { | ||
7028 | remove_msix_isr(sp); | ||
7029 | DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration " | ||
7030 | "failed\n", dev->name, i); | ||
7031 | DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n", | ||
7032 | dev->name); | ||
7033 | sp->config.intr_type = INTA; | ||
7034 | break; | ||
7035 | } | ||
7036 | sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; | ||
7037 | } | 7085 | } |
7038 | if (!err) { | 7086 | if (!err) { |
7039 | printk(KERN_INFO "MSI-X-TX %d entries enabled\n", | ||
7040 | msix_tx_cnt); | ||
7041 | printk(KERN_INFO "MSI-X-RX %d entries enabled\n", | 7087 | printk(KERN_INFO "MSI-X-RX %d entries enabled\n", |
7042 | msix_rx_cnt); | 7088 | --msix_rx_cnt); |
7089 | DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled" | ||
7090 | " through alarm vector\n"); | ||
7043 | } | 7091 | } |
7044 | } | 7092 | } |
7045 | if (sp->config.intr_type == INTA) { | 7093 | if (sp->config.intr_type == INTA) { |
@@ -7080,8 +7128,15 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) | |||
7080 | clear_bit(__S2IO_STATE_CARD_UP, &sp->state); | 7128 | clear_bit(__S2IO_STATE_CARD_UP, &sp->state); |
7081 | 7129 | ||
7082 | /* Disable napi */ | 7130 | /* Disable napi */ |
7083 | if (config->napi) | 7131 | if (sp->config.napi) { |
7084 | napi_disable(&sp->napi); | 7132 | int off = 0; |
7133 | if (config->intr_type == MSI_X) { | ||
7134 | for (; off < sp->config.rx_ring_num; off++) | ||
7135 | napi_disable(&sp->mac_control.rings[off].napi); | ||
7136 | } | ||
7137 | else | ||
7138 | napi_disable(&sp->napi); | ||
7139 | } | ||
7085 | 7140 | ||
7086 | /* disable Tx and Rx traffic on the NIC */ | 7141 | /* disable Tx and Rx traffic on the NIC */ |
7087 | if (do_io) | 7142 | if (do_io) |
@@ -7173,8 +7228,15 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
7173 | } | 7228 | } |
7174 | 7229 | ||
7175 | /* Initialise napi */ | 7230 | /* Initialise napi */ |
7176 | if (config->napi) | 7231 | if (config->napi) { |
7177 | napi_enable(&sp->napi); | 7232 | int i; |
7233 | if (config->intr_type == MSI_X) { | ||
7234 | for (i = 0; i < sp->config.rx_ring_num; i++) | ||
7235 | napi_enable(&sp->mac_control.rings[i].napi); | ||
7236 | } else { | ||
7237 | napi_enable(&sp->napi); | ||
7238 | } | ||
7239 | } | ||
7178 | 7240 | ||
7179 | /* Maintain the state prior to the open */ | 7241 | /* Maintain the state prior to the open */ |
7180 | if (sp->promisc_flg) | 7242 | if (sp->promisc_flg) |
@@ -7217,7 +7279,7 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
7217 | /* Enable select interrupts */ | 7279 | /* Enable select interrupts */ |
7218 | en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); | 7280 | en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); |
7219 | if (sp->config.intr_type != INTA) | 7281 | if (sp->config.intr_type != INTA) |
7220 | en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS); | 7282 | en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS); |
7221 | else { | 7283 | else { |
7222 | interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; | 7284 | interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; |
7223 | interruptible |= TX_PIC_INTR; | 7285 | interruptible |= TX_PIC_INTR; |
@@ -7615,9 +7677,6 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, | |||
7615 | rx_ring_num = MAX_RX_RINGS; | 7677 | rx_ring_num = MAX_RX_RINGS; |
7616 | } | 7678 | } |
7617 | 7679 | ||
7618 | if (*dev_intr_type != INTA) | ||
7619 | napi = 0; | ||
7620 | |||
7621 | if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { | 7680 | if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { |
7622 | DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " | 7681 | DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " |
7623 | "Defaulting to INTA\n"); | 7682 | "Defaulting to INTA\n"); |
@@ -7918,8 +7977,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7918 | * will use eth_mac_addr() for dev->set_mac_address | 7977 | * will use eth_mac_addr() for dev->set_mac_address |
7919 | * mac address will be set every time dev->open() is called | 7978 | * mac address will be set every time dev->open() is called |
7920 | */ | 7979 | */ |
7921 | netif_napi_add(dev, &sp->napi, s2io_poll, 32); | ||
7922 | |||
7923 | #ifdef CONFIG_NET_POLL_CONTROLLER | 7980 | #ifdef CONFIG_NET_POLL_CONTROLLER |
7924 | dev->poll_controller = s2io_netpoll; | 7981 | dev->poll_controller = s2io_netpoll; |
7925 | #endif | 7982 | #endif |
@@ -7963,6 +8020,32 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7963 | } | 8020 | } |
7964 | } | 8021 | } |
7965 | 8022 | ||
8023 | if (sp->config.intr_type == MSI_X) { | ||
8024 | sp->num_entries = config->rx_ring_num + 1; | ||
8025 | ret = s2io_enable_msi_x(sp); | ||
8026 | |||
8027 | if (!ret) { | ||
8028 | ret = s2io_test_msi(sp); | ||
8029 | /* rollback MSI-X, will re-enable during add_isr() */ | ||
8030 | remove_msix_isr(sp); | ||
8031 | } | ||
8032 | if (ret) { | ||
8033 | |||
8034 | DBG_PRINT(ERR_DBG, | ||
8035 | "%s: MSI-X requested but failed to enable\n", | ||
8036 | dev->name); | ||
8037 | sp->config.intr_type = INTA; | ||
8038 | } | ||
8039 | } | ||
8040 | |||
8041 | if (config->intr_type == MSI_X) { | ||
8042 | for (i = 0; i < config->rx_ring_num ; i++) | ||
8043 | netif_napi_add(dev, &mac_control->rings[i].napi, | ||
8044 | s2io_poll_msix, 64); | ||
8045 | } else { | ||
8046 | netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64); | ||
8047 | } | ||
8048 | |||
7966 | /* Not needed for Herc */ | 8049 | /* Not needed for Herc */ |
7967 | if (sp->device_type & XFRAME_I_DEVICE) { | 8050 | if (sp->device_type & XFRAME_I_DEVICE) { |
7968 | /* | 8051 | /* |
@@ -8013,6 +8096,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8013 | /* store mac addresses from CAM to s2io_nic structure */ | 8096 | /* store mac addresses from CAM to s2io_nic structure */ |
8014 | do_s2io_store_unicast_mc(sp); | 8097 | do_s2io_store_unicast_mc(sp); |
8015 | 8098 | ||
8099 | /* Configure MSIX vector for number of rings configured plus one */ | ||
8100 | if ((sp->device_type == XFRAME_II_DEVICE) && | ||
8101 | (config->intr_type == MSI_X)) | ||
8102 | sp->num_entries = config->rx_ring_num + 1; | ||
8103 | |||
8016 | /* Store the values of the MSIX table in the s2io_nic structure */ | 8104 | /* Store the values of the MSIX table in the s2io_nic structure */ |
8017 | store_xmsi_data(sp); | 8105 | store_xmsi_data(sp); |
8018 | /* reset Nic and bring it to known state */ | 8106 | /* reset Nic and bring it to known state */ |
@@ -8078,8 +8166,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8078 | break; | 8166 | break; |
8079 | } | 8167 | } |
8080 | 8168 | ||
8081 | if (napi) | 8169 | switch (sp->config.napi) { |
8170 | case 0: | ||
8171 | DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name); | ||
8172 | break; | ||
8173 | case 1: | ||
8082 | DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); | 8174 | DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); |
8175 | break; | ||
8176 | } | ||
8083 | 8177 | ||
8084 | DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, | 8178 | DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, |
8085 | sp->config.tx_fifo_num); | 8179 | sp->config.tx_fifo_num); |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 0709ebae9139..4706f7f9acb6 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -706,7 +706,7 @@ struct ring_info { | |||
706 | /* per-ring buffer counter */ | 706 | /* per-ring buffer counter */ |
707 | u32 rx_bufs_left; | 707 | u32 rx_bufs_left; |
708 | 708 | ||
709 | #define MAX_LRO_SESSIONS 32 | 709 | #define MAX_LRO_SESSIONS 32 |
710 | struct lro lro0_n[MAX_LRO_SESSIONS]; | 710 | struct lro lro0_n[MAX_LRO_SESSIONS]; |
711 | u8 lro; | 711 | u8 lro; |
712 | 712 | ||
@@ -725,6 +725,11 @@ struct ring_info { | |||
725 | /* copy of sp->pdev pointer */ | 725 | /* copy of sp->pdev pointer */ |
726 | struct pci_dev *pdev; | 726 | struct pci_dev *pdev; |
727 | 727 | ||
728 | /* Per ring napi struct */ | ||
729 | struct napi_struct napi; | ||
730 | |||
731 | unsigned long interrupt_count; | ||
732 | |||
728 | /* | 733 | /* |
729 | * Place holders for the virtual and physical addresses of | 734 | * Place holders for the virtual and physical addresses of |
730 | * all the Rx Blocks | 735 | * all the Rx Blocks |
@@ -841,7 +846,7 @@ struct usr_addr { | |||
841 | * Structure to keep track of the MSI-X vectors and the corresponding | 846 | * Structure to keep track of the MSI-X vectors and the corresponding |
842 | * argument registered against each vector | 847 | * argument registered against each vector |
843 | */ | 848 | */ |
844 | #define MAX_REQUESTED_MSI_X 17 | 849 | #define MAX_REQUESTED_MSI_X 9 |
845 | struct s2io_msix_entry | 850 | struct s2io_msix_entry |
846 | { | 851 | { |
847 | u16 vector; | 852 | u16 vector; |
@@ -849,8 +854,8 @@ struct s2io_msix_entry | |||
849 | void *arg; | 854 | void *arg; |
850 | 855 | ||
851 | u8 type; | 856 | u8 type; |
852 | #define MSIX_FIFO_TYPE 1 | 857 | #define MSIX_ALARM_TYPE 1 |
853 | #define MSIX_RING_TYPE 2 | 858 | #define MSIX_RING_TYPE 2 |
854 | 859 | ||
855 | u8 in_use; | 860 | u8 in_use; |
856 | #define MSIX_REGISTERED_SUCCESS 0xAA | 861 | #define MSIX_REGISTERED_SUCCESS 0xAA |
@@ -877,7 +882,6 @@ struct s2io_nic { | |||
877 | */ | 882 | */ |
878 | int pkts_to_process; | 883 | int pkts_to_process; |
879 | struct net_device *dev; | 884 | struct net_device *dev; |
880 | struct napi_struct napi; | ||
881 | struct mac_info mac_control; | 885 | struct mac_info mac_control; |
882 | struct config_param config; | 886 | struct config_param config; |
883 | struct pci_dev *pdev; | 887 | struct pci_dev *pdev; |
@@ -948,6 +952,7 @@ struct s2io_nic { | |||
948 | */ | 952 | */ |
949 | u8 other_fifo_idx; | 953 | u8 other_fifo_idx; |
950 | 954 | ||
955 | struct napi_struct napi; | ||
951 | /* after blink, the adapter must be restored with original | 956 | /* after blink, the adapter must be restored with original |
952 | * values. | 957 | * values. |
953 | */ | 958 | */ |
@@ -962,6 +967,7 @@ struct s2io_nic { | |||
962 | unsigned long long start_time; | 967 | unsigned long long start_time; |
963 | struct vlan_group *vlgrp; | 968 | struct vlan_group *vlgrp; |
964 | #define MSIX_FLG 0xA5 | 969 | #define MSIX_FLG 0xA5 |
970 | int num_entries; | ||
965 | struct msix_entry *entries; | 971 | struct msix_entry *entries; |
966 | int msi_detected; | 972 | int msi_detected; |
967 | wait_queue_head_t msi_wait; | 973 | wait_queue_head_t msi_wait; |
@@ -982,6 +988,7 @@ struct s2io_nic { | |||
982 | u16 lro_max_aggr_per_sess; | 988 | u16 lro_max_aggr_per_sess; |
983 | volatile unsigned long state; | 989 | volatile unsigned long state; |
984 | u64 general_int_mask; | 990 | u64 general_int_mask; |
991 | |||
985 | #define VPD_STRING_LEN 80 | 992 | #define VPD_STRING_LEN 80 |
986 | u8 product_name[VPD_STRING_LEN]; | 993 | u8 product_name[VPD_STRING_LEN]; |
987 | u8 serial_num[VPD_STRING_LEN]; | 994 | u8 serial_num[VPD_STRING_LEN]; |
@@ -1103,7 +1110,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev); | |||
1103 | static int init_shared_mem(struct s2io_nic *sp); | 1110 | static int init_shared_mem(struct s2io_nic *sp); |
1104 | static void free_shared_mem(struct s2io_nic *sp); | 1111 | static void free_shared_mem(struct s2io_nic *sp); |
1105 | static int init_nic(struct s2io_nic *nic); | 1112 | static int init_nic(struct s2io_nic *nic); |
1106 | static void rx_intr_handler(struct ring_info *ring_data); | 1113 | static int rx_intr_handler(struct ring_info *ring_data, int budget); |
1107 | static void tx_intr_handler(struct fifo_info *fifo_data); | 1114 | static void tx_intr_handler(struct fifo_info *fifo_data); |
1108 | static void s2io_handle_errors(void * dev_id); | 1115 | static void s2io_handle_errors(void * dev_id); |
1109 | 1116 | ||
@@ -1114,7 +1121,8 @@ static void s2io_set_multicast(struct net_device *dev); | |||
1114 | static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); | 1121 | static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); |
1115 | static void s2io_link(struct s2io_nic * sp, int link); | 1122 | static void s2io_link(struct s2io_nic * sp, int link); |
1116 | static void s2io_reset(struct s2io_nic * sp); | 1123 | static void s2io_reset(struct s2io_nic * sp); |
1117 | static int s2io_poll(struct napi_struct *napi, int budget); | 1124 | static int s2io_poll_msix(struct napi_struct *napi, int budget); |
1125 | static int s2io_poll_inta(struct napi_struct *napi, int budget); | ||
1118 | static void s2io_init_pci(struct s2io_nic * sp); | 1126 | static void s2io_init_pci(struct s2io_nic * sp); |
1119 | static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); | 1127 | static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); |
1120 | static void s2io_alarm_handle(unsigned long data); | 1128 | static void s2io_alarm_handle(unsigned long data); |
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 888b7dec9866..33bb18f810fb 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c | |||
@@ -179,8 +179,7 @@ enum sbmac_state { | |||
179 | #define SBMAC_MAX_TXDESCR 256 | 179 | #define SBMAC_MAX_TXDESCR 256 |
180 | #define SBMAC_MAX_RXDESCR 256 | 180 | #define SBMAC_MAX_RXDESCR 256 |
181 | 181 | ||
182 | #define ETHER_ALIGN 2 | 182 | #define ETHER_ADDR_LEN 6 |
183 | #define ETHER_ADDR_LEN 6 | ||
184 | #define ENET_PACKET_SIZE 1518 | 183 | #define ENET_PACKET_SIZE 1518 |
185 | /*#define ENET_PACKET_SIZE 9216 */ | 184 | /*#define ENET_PACKET_SIZE 9216 */ |
186 | 185 | ||
@@ -262,8 +261,6 @@ struct sbmac_softc { | |||
262 | spinlock_t sbm_lock; /* spin lock */ | 261 | spinlock_t sbm_lock; /* spin lock */ |
263 | int sbm_devflags; /* current device flags */ | 262 | int sbm_devflags; /* current device flags */ |
264 | 263 | ||
265 | int sbm_buffersize; | ||
266 | |||
267 | /* | 264 | /* |
268 | * Controller-specific things | 265 | * Controller-specific things |
269 | */ | 266 | */ |
@@ -305,10 +302,11 @@ struct sbmac_softc { | |||
305 | static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, | 302 | static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, |
306 | int txrx, int maxdescr); | 303 | int txrx, int maxdescr); |
307 | static void sbdma_channel_start(struct sbmacdma *d, int rxtx); | 304 | static void sbdma_channel_start(struct sbmacdma *d, int rxtx); |
308 | static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *m); | 305 | static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, |
306 | struct sk_buff *m); | ||
309 | static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); | 307 | static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); |
310 | static void sbdma_emptyring(struct sbmacdma *d); | 308 | static void sbdma_emptyring(struct sbmacdma *d); |
311 | static void sbdma_fillring(struct sbmacdma *d); | 309 | static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d); |
312 | static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, | 310 | static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, |
313 | int work_to_do, int poll); | 311 | int work_to_do, int poll); |
314 | static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, | 312 | static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, |
@@ -777,16 +775,13 @@ static void sbdma_channel_stop(struct sbmacdma *d) | |||
777 | d->sbdma_remptr = NULL; | 775 | d->sbdma_remptr = NULL; |
778 | } | 776 | } |
779 | 777 | ||
780 | static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) | 778 | static inline void sbdma_align_skb(struct sk_buff *skb, |
779 | unsigned int power2, unsigned int offset) | ||
781 | { | 780 | { |
782 | unsigned long addr; | 781 | unsigned char *addr = skb->data; |
783 | unsigned long newaddr; | 782 | unsigned char *newaddr = PTR_ALIGN(addr, power2); |
784 | |||
785 | addr = (unsigned long) skb->data; | ||
786 | |||
787 | newaddr = (addr + power2 - 1) & ~(power2 - 1); | ||
788 | 783 | ||
789 | skb_reserve(skb,newaddr-addr+offset); | 784 | skb_reserve(skb, newaddr - addr + offset); |
790 | } | 785 | } |
791 | 786 | ||
792 | 787 | ||
@@ -797,7 +792,8 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) | |||
797 | * this queues a buffer for inbound packets. | 792 | * this queues a buffer for inbound packets. |
798 | * | 793 | * |
799 | * Input parameters: | 794 | * Input parameters: |
800 | * d - DMA channel descriptor | 795 | * sc - softc structure |
796 | * d - DMA channel descriptor | ||
801 | * sb - sk_buff to add, or NULL if we should allocate one | 797 | * sb - sk_buff to add, or NULL if we should allocate one |
802 | * | 798 | * |
803 | * Return value: | 799 | * Return value: |
@@ -806,8 +802,10 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) | |||
806 | ********************************************************************* */ | 802 | ********************************************************************* */ |
807 | 803 | ||
808 | 804 | ||
809 | static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) | 805 | static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, |
806 | struct sk_buff *sb) | ||
810 | { | 807 | { |
808 | struct net_device *dev = sc->sbm_dev; | ||
811 | struct sbdmadscr *dsc; | 809 | struct sbdmadscr *dsc; |
812 | struct sbdmadscr *nextdsc; | 810 | struct sbdmadscr *nextdsc; |
813 | struct sk_buff *sb_new = NULL; | 811 | struct sk_buff *sb_new = NULL; |
@@ -848,14 +846,16 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) | |||
848 | */ | 846 | */ |
849 | 847 | ||
850 | if (sb == NULL) { | 848 | if (sb == NULL) { |
851 | sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN); | 849 | sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + |
850 | SMP_CACHE_BYTES * 2 + | ||
851 | NET_IP_ALIGN); | ||
852 | if (sb_new == NULL) { | 852 | if (sb_new == NULL) { |
853 | pr_info("%s: sk_buff allocation failed\n", | 853 | pr_info("%s: sk_buff allocation failed\n", |
854 | d->sbdma_eth->sbm_dev->name); | 854 | d->sbdma_eth->sbm_dev->name); |
855 | return -ENOBUFS; | 855 | return -ENOBUFS; |
856 | } | 856 | } |
857 | 857 | ||
858 | sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); | 858 | sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); |
859 | } | 859 | } |
860 | else { | 860 | else { |
861 | sb_new = sb; | 861 | sb_new = sb; |
@@ -874,10 +874,10 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) | |||
874 | * Do not interrupt per DMA transfer. | 874 | * Do not interrupt per DMA transfer. |
875 | */ | 875 | */ |
876 | dsc->dscr_a = virt_to_phys(sb_new->data) | | 876 | dsc->dscr_a = virt_to_phys(sb_new->data) | |
877 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0; | 877 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0; |
878 | #else | 878 | #else |
879 | dsc->dscr_a = virt_to_phys(sb_new->data) | | 879 | dsc->dscr_a = virt_to_phys(sb_new->data) | |
880 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | | 880 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | |
881 | M_DMA_DSCRA_INTERRUPT; | 881 | M_DMA_DSCRA_INTERRUPT; |
882 | #endif | 882 | #endif |
883 | 883 | ||
@@ -1032,18 +1032,19 @@ static void sbdma_emptyring(struct sbmacdma *d) | |||
1032 | * with sk_buffs | 1032 | * with sk_buffs |
1033 | * | 1033 | * |
1034 | * Input parameters: | 1034 | * Input parameters: |
1035 | * d - DMA channel | 1035 | * sc - softc structure |
1036 | * d - DMA channel | ||
1036 | * | 1037 | * |
1037 | * Return value: | 1038 | * Return value: |
1038 | * nothing | 1039 | * nothing |
1039 | ********************************************************************* */ | 1040 | ********************************************************************* */ |
1040 | 1041 | ||
1041 | static void sbdma_fillring(struct sbmacdma *d) | 1042 | static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d) |
1042 | { | 1043 | { |
1043 | int idx; | 1044 | int idx; |
1044 | 1045 | ||
1045 | for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) { | 1046 | for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) { |
1046 | if (sbdma_add_rcvbuffer(d,NULL) != 0) | 1047 | if (sbdma_add_rcvbuffer(sc, d, NULL) != 0) |
1047 | break; | 1048 | break; |
1048 | } | 1049 | } |
1049 | } | 1050 | } |
@@ -1159,10 +1160,11 @@ again: | |||
1159 | * packet and put it right back on the receive ring. | 1160 | * packet and put it right back on the receive ring. |
1160 | */ | 1161 | */ |
1161 | 1162 | ||
1162 | if (unlikely (sbdma_add_rcvbuffer(d,NULL) == | 1163 | if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) == |
1163 | -ENOBUFS)) { | 1164 | -ENOBUFS)) { |
1164 | dev->stats.rx_dropped++; | 1165 | dev->stats.rx_dropped++; |
1165 | sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ | 1166 | /* Re-add old buffer */ |
1167 | sbdma_add_rcvbuffer(sc, d, sb); | ||
1166 | /* No point in continuing at the moment */ | 1168 | /* No point in continuing at the moment */ |
1167 | printk(KERN_ERR "dropped packet (1)\n"); | 1169 | printk(KERN_ERR "dropped packet (1)\n"); |
1168 | d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); | 1170 | d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); |
@@ -1212,7 +1214,7 @@ again: | |||
1212 | * put it back on the receive ring. | 1214 | * put it back on the receive ring. |
1213 | */ | 1215 | */ |
1214 | dev->stats.rx_errors++; | 1216 | dev->stats.rx_errors++; |
1215 | sbdma_add_rcvbuffer(d,sb); | 1217 | sbdma_add_rcvbuffer(sc, d, sb); |
1216 | } | 1218 | } |
1217 | 1219 | ||
1218 | 1220 | ||
@@ -1570,7 +1572,7 @@ static void sbmac_channel_start(struct sbmac_softc *s) | |||
1570 | * Fill the receive ring | 1572 | * Fill the receive ring |
1571 | */ | 1573 | */ |
1572 | 1574 | ||
1573 | sbdma_fillring(&(s->sbm_rxdma)); | 1575 | sbdma_fillring(s, &(s->sbm_rxdma)); |
1574 | 1576 | ||
1575 | /* | 1577 | /* |
1576 | * Turn on the rest of the bits in the enable register | 1578 | * Turn on the rest of the bits in the enable register |
@@ -2312,13 +2314,6 @@ static int sbmac_init(struct platform_device *pldev, long long base) | |||
2312 | dev->dev_addr[i] = eaddr[i]; | 2314 | dev->dev_addr[i] = eaddr[i]; |
2313 | } | 2315 | } |
2314 | 2316 | ||
2315 | |||
2316 | /* | ||
2317 | * Init packet size | ||
2318 | */ | ||
2319 | |||
2320 | sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN; | ||
2321 | |||
2322 | /* | 2317 | /* |
2323 | * Initialize context (get pointers to registers and stuff), then | 2318 | * Initialize context (get pointers to registers and stuff), then |
2324 | * allocate the memory for the descriptor tables. | 2319 | * allocate the memory for the descriptor tables. |
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index f64a860029b7..61955f8d8011 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c | |||
@@ -953,9 +953,6 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
953 | unsigned entry; | 953 | unsigned entry; |
954 | u32 tx_status; | 954 | u32 tx_status; |
955 | 955 | ||
956 | if (skb_padto(skb, ETH_ZLEN)) | ||
957 | return NETDEV_TX_OK; | ||
958 | |||
959 | if (unlikely(skb->len > TX_BUF_SIZE)) { | 956 | if (unlikely(skb->len > TX_BUF_SIZE)) { |
960 | dev->stats.tx_dropped++; | 957 | dev->stats.tx_dropped++; |
961 | goto out; | 958 | goto out; |
@@ -975,6 +972,11 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
975 | skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); | 972 | skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); |
976 | 973 | ||
977 | len = skb->len; | 974 | len = skb->len; |
975 | if (len < ETH_ZLEN) { | ||
976 | memset(priv->tx_bufs + entry * TX_BUF_SIZE + len, | ||
977 | 0, ETH_ZLEN - len); | ||
978 | len = ETH_ZLEN; | ||
979 | } | ||
978 | 980 | ||
979 | wmb(); | 981 | wmb(); |
980 | 982 | ||
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h index 2806201644cc..2c79d27404e0 100644 --- a/drivers/net/sfc/bitfield.h +++ b/drivers/net/sfc/bitfield.h | |||
@@ -483,7 +483,7 @@ typedef union efx_oword { | |||
483 | #endif | 483 | #endif |
484 | 484 | ||
485 | #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ | 485 | #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ |
486 | if (FALCON_REV(efx) >= FALCON_REV_B0) { \ | 486 | if (falcon_rev(efx) >= FALCON_REV_B0) { \ |
487 | EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ | 487 | EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ |
488 | } else { \ | 488 | } else { \ |
489 | EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ | 489 | EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ |
@@ -491,7 +491,7 @@ typedef union efx_oword { | |||
491 | } while (0) | 491 | } while (0) |
492 | 492 | ||
493 | #define EFX_QWORD_FIELD_VER(efx, qword, field) \ | 493 | #define EFX_QWORD_FIELD_VER(efx, qword, field) \ |
494 | (FALCON_REV(efx) >= FALCON_REV_B0 ? \ | 494 | (falcon_rev(efx) >= FALCON_REV_B0 ? \ |
495 | EFX_QWORD_FIELD((qword), field##_B0) : \ | 495 | EFX_QWORD_FIELD((qword), field##_B0) : \ |
496 | EFX_QWORD_FIELD((qword), field##_A1)) | 496 | EFX_QWORD_FIELD((qword), field##_A1)) |
497 | 497 | ||
@@ -501,8 +501,5 @@ typedef union efx_oword { | |||
501 | #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) | 501 | #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) |
502 | #define EFX_DMA_TYPE_WIDTH(width) \ | 502 | #define EFX_DMA_TYPE_WIDTH(width) \ |
503 | (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) | 503 | (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) |
504 | #define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \ | ||
505 | ~((u64) 0) : ~((u32) 0)) | ||
506 | #define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK) | ||
507 | 504 | ||
508 | #endif /* EFX_BITFIELD_H */ | 505 | #endif /* EFX_BITFIELD_H */ |
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c index eecaa6d58584..7fc0328dc055 100644 --- a/drivers/net/sfc/boards.c +++ b/drivers/net/sfc/boards.c | |||
@@ -27,10 +27,8 @@ static void blink_led_timer(unsigned long context) | |||
27 | struct efx_blinker *bl = &efx->board_info.blinker; | 27 | struct efx_blinker *bl = &efx->board_info.blinker; |
28 | efx->board_info.set_fault_led(efx, bl->state); | 28 | efx->board_info.set_fault_led(efx, bl->state); |
29 | bl->state = !bl->state; | 29 | bl->state = !bl->state; |
30 | if (bl->resubmit) { | 30 | if (bl->resubmit) |
31 | bl->timer.expires = jiffies + BLINK_INTERVAL; | 31 | mod_timer(&bl->timer, jiffies + BLINK_INTERVAL); |
32 | add_timer(&bl->timer); | ||
33 | } | ||
34 | } | 32 | } |
35 | 33 | ||
36 | static void board_blink(struct efx_nic *efx, int blink) | 34 | static void board_blink(struct efx_nic *efx, int blink) |
@@ -44,8 +42,7 @@ static void board_blink(struct efx_nic *efx, int blink) | |||
44 | blinker->state = 0; | 42 | blinker->state = 0; |
45 | setup_timer(&blinker->timer, blink_led_timer, | 43 | setup_timer(&blinker->timer, blink_led_timer, |
46 | (unsigned long)efx); | 44 | (unsigned long)efx); |
47 | blinker->timer.expires = jiffies + BLINK_INTERVAL; | 45 | mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL); |
48 | add_timer(&blinker->timer); | ||
49 | } else { | 46 | } else { |
50 | blinker->resubmit = 0; | 47 | blinker->resubmit = 0; |
51 | if (blinker->timer.function) | 48 | if (blinker->timer.function) |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 418f2e53a95b..449760642e31 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -199,11 +199,12 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota) | |||
199 | */ | 199 | */ |
200 | static inline void efx_channel_processed(struct efx_channel *channel) | 200 | static inline void efx_channel_processed(struct efx_channel *channel) |
201 | { | 201 | { |
202 | /* Write to EVQ_RPTR_REG. If a new event arrived in a race | 202 | /* The interrupt handler for this channel may set work_pending |
203 | * with finishing processing, a new interrupt will be raised. | 203 | * as soon as we acknowledge the events we've seen. Make sure |
204 | */ | 204 | * it's cleared before then. */ |
205 | channel->work_pending = 0; | 205 | channel->work_pending = 0; |
206 | smp_wmb(); /* Ensure channel updated before any new interrupt. */ | 206 | smp_wmb(); |
207 | |||
207 | falcon_eventq_read_ack(channel); | 208 | falcon_eventq_read_ack(channel); |
208 | } | 209 | } |
209 | 210 | ||
@@ -265,7 +266,7 @@ void efx_process_channel_now(struct efx_channel *channel) | |||
265 | napi_disable(&channel->napi_str); | 266 | napi_disable(&channel->napi_str); |
266 | 267 | ||
267 | /* Poll the channel */ | 268 | /* Poll the channel */ |
268 | (void) efx_process_channel(channel, efx->type->evq_size); | 269 | efx_process_channel(channel, efx->type->evq_size); |
269 | 270 | ||
270 | /* Ack the eventq. This may cause an interrupt to be generated | 271 | /* Ack the eventq. This may cause an interrupt to be generated |
271 | * when they are reenabled */ | 272 | * when they are reenabled */ |
@@ -317,26 +318,6 @@ static void efx_remove_eventq(struct efx_channel *channel) | |||
317 | * | 318 | * |
318 | *************************************************************************/ | 319 | *************************************************************************/ |
319 | 320 | ||
320 | /* Setup per-NIC RX buffer parameters. | ||
321 | * Calculate the rx buffer allocation parameters required to support | ||
322 | * the current MTU, including padding for header alignment and overruns. | ||
323 | */ | ||
324 | static void efx_calc_rx_buffer_params(struct efx_nic *efx) | ||
325 | { | ||
326 | unsigned int order, len; | ||
327 | |||
328 | len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + | ||
329 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | ||
330 | efx->type->rx_buffer_padding); | ||
331 | |||
332 | /* Calculate page-order */ | ||
333 | for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order) | ||
334 | ; | ||
335 | |||
336 | efx->rx_buffer_len = len; | ||
337 | efx->rx_buffer_order = order; | ||
338 | } | ||
339 | |||
340 | static int efx_probe_channel(struct efx_channel *channel) | 321 | static int efx_probe_channel(struct efx_channel *channel) |
341 | { | 322 | { |
342 | struct efx_tx_queue *tx_queue; | 323 | struct efx_tx_queue *tx_queue; |
@@ -387,7 +368,14 @@ static int efx_init_channels(struct efx_nic *efx) | |||
387 | struct efx_channel *channel; | 368 | struct efx_channel *channel; |
388 | int rc = 0; | 369 | int rc = 0; |
389 | 370 | ||
390 | efx_calc_rx_buffer_params(efx); | 371 | /* Calculate the rx buffer allocation parameters required to |
372 | * support the current MTU, including padding for header | ||
373 | * alignment and overruns. | ||
374 | */ | ||
375 | efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + | ||
376 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | ||
377 | efx->type->rx_buffer_padding); | ||
378 | efx->rx_buffer_order = get_order(efx->rx_buffer_len); | ||
391 | 379 | ||
392 | /* Initialise the channels */ | 380 | /* Initialise the channels */ |
393 | efx_for_each_channel(channel, efx) { | 381 | efx_for_each_channel(channel, efx) { |
@@ -440,9 +428,12 @@ static void efx_start_channel(struct efx_channel *channel) | |||
440 | netif_napi_add(channel->napi_dev, &channel->napi_str, | 428 | netif_napi_add(channel->napi_dev, &channel->napi_str, |
441 | efx_poll, napi_weight); | 429 | efx_poll, napi_weight); |
442 | 430 | ||
431 | /* The interrupt handler for this channel may set work_pending | ||
432 | * as soon as we enable it. Make sure it's cleared before | ||
433 | * then. Similarly, make sure it sees the enabled flag set. */ | ||
443 | channel->work_pending = 0; | 434 | channel->work_pending = 0; |
444 | channel->enabled = 1; | 435 | channel->enabled = 1; |
445 | smp_wmb(); /* ensure channel updated before first interrupt */ | 436 | smp_wmb(); |
446 | 437 | ||
447 | napi_enable(&channel->napi_str); | 438 | napi_enable(&channel->napi_str); |
448 | 439 | ||
@@ -704,7 +695,7 @@ static void efx_stop_port(struct efx_nic *efx) | |||
704 | mutex_unlock(&efx->mac_lock); | 695 | mutex_unlock(&efx->mac_lock); |
705 | 696 | ||
706 | /* Serialise against efx_set_multicast_list() */ | 697 | /* Serialise against efx_set_multicast_list() */ |
707 | if (NET_DEV_REGISTERED(efx)) { | 698 | if (efx_dev_registered(efx)) { |
708 | netif_tx_lock_bh(efx->net_dev); | 699 | netif_tx_lock_bh(efx->net_dev); |
709 | netif_tx_unlock_bh(efx->net_dev); | 700 | netif_tx_unlock_bh(efx->net_dev); |
710 | } | 701 | } |
@@ -791,22 +782,23 @@ static int efx_init_io(struct efx_nic *efx) | |||
791 | efx->membase = ioremap_nocache(efx->membase_phys, | 782 | efx->membase = ioremap_nocache(efx->membase_phys, |
792 | efx->type->mem_map_size); | 783 | efx->type->mem_map_size); |
793 | if (!efx->membase) { | 784 | if (!efx->membase) { |
794 | EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n", | 785 | EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n", |
795 | efx->type->mem_bar, efx->membase_phys, | 786 | efx->type->mem_bar, |
787 | (unsigned long long)efx->membase_phys, | ||
796 | efx->type->mem_map_size); | 788 | efx->type->mem_map_size); |
797 | rc = -ENOMEM; | 789 | rc = -ENOMEM; |
798 | goto fail4; | 790 | goto fail4; |
799 | } | 791 | } |
800 | EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n", | 792 | EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n", |
801 | efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size, | 793 | efx->type->mem_bar, (unsigned long long)efx->membase_phys, |
802 | efx->membase); | 794 | efx->type->mem_map_size, efx->membase); |
803 | 795 | ||
804 | return 0; | 796 | return 0; |
805 | 797 | ||
806 | fail4: | 798 | fail4: |
807 | release_mem_region(efx->membase_phys, efx->type->mem_map_size); | 799 | release_mem_region(efx->membase_phys, efx->type->mem_map_size); |
808 | fail3: | 800 | fail3: |
809 | efx->membase_phys = 0UL; | 801 | efx->membase_phys = 0; |
810 | fail2: | 802 | fail2: |
811 | pci_disable_device(efx->pci_dev); | 803 | pci_disable_device(efx->pci_dev); |
812 | fail1: | 804 | fail1: |
@@ -824,7 +816,7 @@ static void efx_fini_io(struct efx_nic *efx) | |||
824 | 816 | ||
825 | if (efx->membase_phys) { | 817 | if (efx->membase_phys) { |
826 | pci_release_region(efx->pci_dev, efx->type->mem_bar); | 818 | pci_release_region(efx->pci_dev, efx->type->mem_bar); |
827 | efx->membase_phys = 0UL; | 819 | efx->membase_phys = 0; |
828 | } | 820 | } |
829 | 821 | ||
830 | pci_disable_device(efx->pci_dev); | 822 | pci_disable_device(efx->pci_dev); |
@@ -1043,7 +1035,7 @@ static void efx_start_all(struct efx_nic *efx) | |||
1043 | return; | 1035 | return; |
1044 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) | 1036 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) |
1045 | return; | 1037 | return; |
1046 | if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev)) | 1038 | if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) |
1047 | return; | 1039 | return; |
1048 | 1040 | ||
1049 | /* Mark the port as enabled so port reconfigurations can start, then | 1041 | /* Mark the port as enabled so port reconfigurations can start, then |
@@ -1073,9 +1065,8 @@ static void efx_flush_all(struct efx_nic *efx) | |||
1073 | cancel_delayed_work_sync(&efx->monitor_work); | 1065 | cancel_delayed_work_sync(&efx->monitor_work); |
1074 | 1066 | ||
1075 | /* Ensure that all RX slow refills are complete. */ | 1067 | /* Ensure that all RX slow refills are complete. */ |
1076 | efx_for_each_rx_queue(rx_queue, efx) { | 1068 | efx_for_each_rx_queue(rx_queue, efx) |
1077 | cancel_delayed_work_sync(&rx_queue->work); | 1069 | cancel_delayed_work_sync(&rx_queue->work); |
1078 | } | ||
1079 | 1070 | ||
1080 | /* Stop scheduled port reconfigurations */ | 1071 | /* Stop scheduled port reconfigurations */ |
1081 | cancel_work_sync(&efx->reconfigure_work); | 1072 | cancel_work_sync(&efx->reconfigure_work); |
@@ -1101,9 +1092,10 @@ static void efx_stop_all(struct efx_nic *efx) | |||
1101 | falcon_disable_interrupts(efx); | 1092 | falcon_disable_interrupts(efx); |
1102 | if (efx->legacy_irq) | 1093 | if (efx->legacy_irq) |
1103 | synchronize_irq(efx->legacy_irq); | 1094 | synchronize_irq(efx->legacy_irq); |
1104 | efx_for_each_channel_with_interrupt(channel, efx) | 1095 | efx_for_each_channel_with_interrupt(channel, efx) { |
1105 | if (channel->irq) | 1096 | if (channel->irq) |
1106 | synchronize_irq(channel->irq); | 1097 | synchronize_irq(channel->irq); |
1098 | } | ||
1107 | 1099 | ||
1108 | /* Stop all NAPI processing and synchronous rx refills */ | 1100 | /* Stop all NAPI processing and synchronous rx refills */ |
1109 | efx_for_each_channel(channel, efx) | 1101 | efx_for_each_channel(channel, efx) |
@@ -1125,7 +1117,7 @@ static void efx_stop_all(struct efx_nic *efx) | |||
1125 | /* Stop the kernel transmit interface late, so the watchdog | 1117 | /* Stop the kernel transmit interface late, so the watchdog |
1126 | * timer isn't ticking over the flush */ | 1118 | * timer isn't ticking over the flush */ |
1127 | efx_stop_queue(efx); | 1119 | efx_stop_queue(efx); |
1128 | if (NET_DEV_REGISTERED(efx)) { | 1120 | if (efx_dev_registered(efx)) { |
1129 | netif_tx_lock_bh(efx->net_dev); | 1121 | netif_tx_lock_bh(efx->net_dev); |
1130 | netif_tx_unlock_bh(efx->net_dev); | 1122 | netif_tx_unlock_bh(efx->net_dev); |
1131 | } | 1123 | } |
@@ -1344,13 +1336,17 @@ static int efx_net_stop(struct net_device *net_dev) | |||
1344 | return 0; | 1336 | return 0; |
1345 | } | 1337 | } |
1346 | 1338 | ||
1347 | /* Context: process, dev_base_lock held, non-blocking. */ | 1339 | /* Context: process, dev_base_lock or RTNL held, non-blocking. */ |
1348 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) | 1340 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) |
1349 | { | 1341 | { |
1350 | struct efx_nic *efx = net_dev->priv; | 1342 | struct efx_nic *efx = net_dev->priv; |
1351 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | 1343 | struct efx_mac_stats *mac_stats = &efx->mac_stats; |
1352 | struct net_device_stats *stats = &net_dev->stats; | 1344 | struct net_device_stats *stats = &net_dev->stats; |
1353 | 1345 | ||
1346 | /* Update stats if possible, but do not wait if another thread | ||
1347 | * is updating them (or resetting the NIC); slightly stale | ||
1348 | * stats are acceptable. | ||
1349 | */ | ||
1354 | if (!spin_trylock(&efx->stats_lock)) | 1350 | if (!spin_trylock(&efx->stats_lock)) |
1355 | return stats; | 1351 | return stats; |
1356 | if (efx->state == STATE_RUNNING) { | 1352 | if (efx->state == STATE_RUNNING) { |
@@ -1494,7 +1490,7 @@ static void efx_set_multicast_list(struct net_device *net_dev) | |||
1494 | static int efx_netdev_event(struct notifier_block *this, | 1490 | static int efx_netdev_event(struct notifier_block *this, |
1495 | unsigned long event, void *ptr) | 1491 | unsigned long event, void *ptr) |
1496 | { | 1492 | { |
1497 | struct net_device *net_dev = (struct net_device *)ptr; | 1493 | struct net_device *net_dev = ptr; |
1498 | 1494 | ||
1499 | if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { | 1495 | if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { |
1500 | struct efx_nic *efx = net_dev->priv; | 1496 | struct efx_nic *efx = net_dev->priv; |
@@ -1563,7 +1559,7 @@ static void efx_unregister_netdev(struct efx_nic *efx) | |||
1563 | efx_for_each_tx_queue(tx_queue, efx) | 1559 | efx_for_each_tx_queue(tx_queue, efx) |
1564 | efx_release_tx_buffers(tx_queue); | 1560 | efx_release_tx_buffers(tx_queue); |
1565 | 1561 | ||
1566 | if (NET_DEV_REGISTERED(efx)) { | 1562 | if (efx_dev_registered(efx)) { |
1567 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); | 1563 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); |
1568 | unregister_netdev(efx->net_dev); | 1564 | unregister_netdev(efx->net_dev); |
1569 | } | 1565 | } |
@@ -1688,7 +1684,7 @@ static int efx_reset(struct efx_nic *efx) | |||
1688 | if (method == RESET_TYPE_DISABLE) { | 1684 | if (method == RESET_TYPE_DISABLE) { |
1689 | /* Reinitialise the device anyway so the driver unload sequence | 1685 | /* Reinitialise the device anyway so the driver unload sequence |
1690 | * can talk to the external SRAM */ | 1686 | * can talk to the external SRAM */ |
1691 | (void) falcon_init_nic(efx); | 1687 | falcon_init_nic(efx); |
1692 | rc = -EIO; | 1688 | rc = -EIO; |
1693 | goto fail4; | 1689 | goto fail4; |
1694 | } | 1690 | } |
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index b57cc68058c0..790db89db345 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -116,17 +116,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | |||
116 | ************************************************************************** | 116 | ************************************************************************** |
117 | */ | 117 | */ |
118 | 118 | ||
119 | /* DMA address mask (up to 46-bit, avoiding compiler warnings) | 119 | /* DMA address mask */ |
120 | * | 120 | #define FALCON_DMA_MASK DMA_BIT_MASK(46) |
121 | * Note that it is possible to have a platform with 64-bit longs and | ||
122 | * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the | ||
123 | * platform DMA mask. | ||
124 | */ | ||
125 | #if BITS_PER_LONG == 64 | ||
126 | #define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL) | ||
127 | #else | ||
128 | #define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL) | ||
129 | #endif | ||
130 | 121 | ||
131 | /* TX DMA length mask (13-bit) */ | 122 | /* TX DMA length mask (13-bit) */ |
132 | #define FALCON_TX_DMA_MASK (4096 - 1) | 123 | #define FALCON_TX_DMA_MASK (4096 - 1) |
@@ -145,7 +136,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | |||
145 | #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 | 136 | #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 |
146 | 137 | ||
147 | #define FALCON_IS_DUAL_FUNC(efx) \ | 138 | #define FALCON_IS_DUAL_FUNC(efx) \ |
148 | (FALCON_REV(efx) < FALCON_REV_B0) | 139 | (falcon_rev(efx) < FALCON_REV_B0) |
149 | 140 | ||
150 | /************************************************************************** | 141 | /************************************************************************** |
151 | * | 142 | * |
@@ -465,7 +456,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
465 | TX_DESCQ_TYPE, 0, | 456 | TX_DESCQ_TYPE, 0, |
466 | TX_NON_IP_DROP_DIS_B0, 1); | 457 | TX_NON_IP_DROP_DIS_B0, 1); |
467 | 458 | ||
468 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 459 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
469 | int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); | 460 | int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); |
470 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); | 461 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); |
471 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); | 462 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); |
@@ -474,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
474 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | 465 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, |
475 | tx_queue->queue); | 466 | tx_queue->queue); |
476 | 467 | ||
477 | if (FALCON_REV(efx) < FALCON_REV_B0) { | 468 | if (falcon_rev(efx) < FALCON_REV_B0) { |
478 | efx_oword_t reg; | 469 | efx_oword_t reg; |
479 | 470 | ||
480 | BUG_ON(tx_queue->queue >= 128); /* HW limit */ | 471 | BUG_ON(tx_queue->queue >= 128); /* HW limit */ |
@@ -635,7 +626,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue) | |||
635 | efx_oword_t rx_desc_ptr; | 626 | efx_oword_t rx_desc_ptr; |
636 | struct efx_nic *efx = rx_queue->efx; | 627 | struct efx_nic *efx = rx_queue->efx; |
637 | int rc; | 628 | int rc; |
638 | int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; | 629 | int is_b0 = falcon_rev(efx) >= FALCON_REV_B0; |
639 | int iscsi_digest_en = is_b0; | 630 | int iscsi_digest_en = is_b0; |
640 | 631 | ||
641 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", | 632 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", |
@@ -742,8 +733,10 @@ void falcon_fini_rx(struct efx_rx_queue *rx_queue) | |||
742 | continue; | 733 | continue; |
743 | break; | 734 | break; |
744 | } | 735 | } |
745 | if (rc) | 736 | if (rc) { |
746 | EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue); | 737 | EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue); |
738 | efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); | ||
739 | } | ||
747 | 740 | ||
748 | /* Remove RX descriptor ring from card */ | 741 | /* Remove RX descriptor ring from card */ |
749 | EFX_ZERO_OWORD(rx_desc_ptr); | 742 | EFX_ZERO_OWORD(rx_desc_ptr); |
@@ -822,10 +815,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel, | |||
822 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); | 815 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); |
823 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | 816 | tx_queue = &efx->tx_queue[tx_ev_q_label]; |
824 | 817 | ||
825 | if (NET_DEV_REGISTERED(efx)) | 818 | if (efx_dev_registered(efx)) |
826 | netif_tx_lock(efx->net_dev); | 819 | netif_tx_lock(efx->net_dev); |
827 | falcon_notify_tx_desc(tx_queue); | 820 | falcon_notify_tx_desc(tx_queue); |
828 | if (NET_DEV_REGISTERED(efx)) | 821 | if (efx_dev_registered(efx)) |
829 | netif_tx_unlock(efx->net_dev); | 822 | netif_tx_unlock(efx->net_dev); |
830 | } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && | 823 | } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && |
831 | EFX_WORKAROUND_10727(efx)) { | 824 | EFX_WORKAROUND_10727(efx)) { |
@@ -884,7 +877,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
884 | RX_EV_TCP_UDP_CHKSUM_ERR); | 877 | RX_EV_TCP_UDP_CHKSUM_ERR); |
885 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); | 878 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); |
886 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); | 879 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); |
887 | rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? | 880 | rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ? |
888 | 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); | 881 | 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); |
889 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); | 882 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); |
890 | 883 | ||
@@ -1065,7 +1058,7 @@ static void falcon_handle_global_event(struct efx_channel *channel, | |||
1065 | EFX_QWORD_FIELD(*event, XG_PHY_INTR)) | 1058 | EFX_QWORD_FIELD(*event, XG_PHY_INTR)) |
1066 | is_phy_event = 1; | 1059 | is_phy_event = 1; |
1067 | 1060 | ||
1068 | if ((FALCON_REV(efx) >= FALCON_REV_B0) && | 1061 | if ((falcon_rev(efx) >= FALCON_REV_B0) && |
1069 | EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) | 1062 | EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) |
1070 | is_phy_event = 1; | 1063 | is_phy_event = 1; |
1071 | 1064 | ||
@@ -1405,7 +1398,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx) | |||
1405 | static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) | 1398 | static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) |
1406 | { | 1399 | { |
1407 | struct falcon_nic_data *nic_data = efx->nic_data; | 1400 | struct falcon_nic_data *nic_data = efx->nic_data; |
1408 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1401 | efx_oword_t *int_ker = efx->irq_status.addr; |
1409 | efx_oword_t fatal_intr; | 1402 | efx_oword_t fatal_intr; |
1410 | int error, mem_perr; | 1403 | int error, mem_perr; |
1411 | static int n_int_errors; | 1404 | static int n_int_errors; |
@@ -1451,8 +1444,8 @@ out: | |||
1451 | */ | 1444 | */ |
1452 | static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) | 1445 | static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) |
1453 | { | 1446 | { |
1454 | struct efx_nic *efx = (struct efx_nic *)dev_id; | 1447 | struct efx_nic *efx = dev_id; |
1455 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1448 | efx_oword_t *int_ker = efx->irq_status.addr; |
1456 | struct efx_channel *channel; | 1449 | struct efx_channel *channel; |
1457 | efx_dword_t reg; | 1450 | efx_dword_t reg; |
1458 | u32 queues; | 1451 | u32 queues; |
@@ -1489,8 +1482,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) | |||
1489 | 1482 | ||
1490 | static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | 1483 | static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) |
1491 | { | 1484 | { |
1492 | struct efx_nic *efx = (struct efx_nic *)dev_id; | 1485 | struct efx_nic *efx = dev_id; |
1493 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1486 | efx_oword_t *int_ker = efx->irq_status.addr; |
1494 | struct efx_channel *channel; | 1487 | struct efx_channel *channel; |
1495 | int syserr; | 1488 | int syserr; |
1496 | int queues; | 1489 | int queues; |
@@ -1542,9 +1535,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | |||
1542 | */ | 1535 | */ |
1543 | static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) | 1536 | static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) |
1544 | { | 1537 | { |
1545 | struct efx_channel *channel = (struct efx_channel *)dev_id; | 1538 | struct efx_channel *channel = dev_id; |
1546 | struct efx_nic *efx = channel->efx; | 1539 | struct efx_nic *efx = channel->efx; |
1547 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1540 | efx_oword_t *int_ker = efx->irq_status.addr; |
1548 | int syserr; | 1541 | int syserr; |
1549 | 1542 | ||
1550 | efx->last_irq_cpu = raw_smp_processor_id(); | 1543 | efx->last_irq_cpu = raw_smp_processor_id(); |
@@ -1572,7 +1565,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx) | |||
1572 | unsigned long offset; | 1565 | unsigned long offset; |
1573 | efx_dword_t dword; | 1566 | efx_dword_t dword; |
1574 | 1567 | ||
1575 | if (FALCON_REV(efx) < FALCON_REV_B0) | 1568 | if (falcon_rev(efx) < FALCON_REV_B0) |
1576 | return; | 1569 | return; |
1577 | 1570 | ||
1578 | for (offset = RX_RSS_INDIR_TBL_B0; | 1571 | for (offset = RX_RSS_INDIR_TBL_B0; |
@@ -1595,7 +1588,7 @@ int falcon_init_interrupt(struct efx_nic *efx) | |||
1595 | 1588 | ||
1596 | if (!EFX_INT_MODE_USE_MSI(efx)) { | 1589 | if (!EFX_INT_MODE_USE_MSI(efx)) { |
1597 | irq_handler_t handler; | 1590 | irq_handler_t handler; |
1598 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1591 | if (falcon_rev(efx) >= FALCON_REV_B0) |
1599 | handler = falcon_legacy_interrupt_b0; | 1592 | handler = falcon_legacy_interrupt_b0; |
1600 | else | 1593 | else |
1601 | handler = falcon_legacy_interrupt_a1; | 1594 | handler = falcon_legacy_interrupt_a1; |
@@ -1636,12 +1629,13 @@ void falcon_fini_interrupt(struct efx_nic *efx) | |||
1636 | efx_oword_t reg; | 1629 | efx_oword_t reg; |
1637 | 1630 | ||
1638 | /* Disable MSI/MSI-X interrupts */ | 1631 | /* Disable MSI/MSI-X interrupts */ |
1639 | efx_for_each_channel_with_interrupt(channel, efx) | 1632 | efx_for_each_channel_with_interrupt(channel, efx) { |
1640 | if (channel->irq) | 1633 | if (channel->irq) |
1641 | free_irq(channel->irq, channel); | 1634 | free_irq(channel->irq, channel); |
1635 | } | ||
1642 | 1636 | ||
1643 | /* ACK legacy interrupt */ | 1637 | /* ACK legacy interrupt */ |
1644 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1638 | if (falcon_rev(efx) >= FALCON_REV_B0) |
1645 | falcon_read(efx, ®, INT_ISR0_B0); | 1639 | falcon_read(efx, ®, INT_ISR0_B0); |
1646 | else | 1640 | else |
1647 | falcon_irq_ack_a1(efx); | 1641 | falcon_irq_ack_a1(efx); |
@@ -1732,7 +1726,7 @@ void falcon_drain_tx_fifo(struct efx_nic *efx) | |||
1732 | efx_oword_t temp; | 1726 | efx_oword_t temp; |
1733 | int count; | 1727 | int count; |
1734 | 1728 | ||
1735 | if ((FALCON_REV(efx) < FALCON_REV_B0) || | 1729 | if ((falcon_rev(efx) < FALCON_REV_B0) || |
1736 | (efx->loopback_mode != LOOPBACK_NONE)) | 1730 | (efx->loopback_mode != LOOPBACK_NONE)) |
1737 | return; | 1731 | return; |
1738 | 1732 | ||
@@ -1785,7 +1779,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) | |||
1785 | { | 1779 | { |
1786 | efx_oword_t temp; | 1780 | efx_oword_t temp; |
1787 | 1781 | ||
1788 | if (FALCON_REV(efx) < FALCON_REV_B0) | 1782 | if (falcon_rev(efx) < FALCON_REV_B0) |
1789 | return; | 1783 | return; |
1790 | 1784 | ||
1791 | /* Isolate the MAC -> RX */ | 1785 | /* Isolate the MAC -> RX */ |
@@ -1823,7 +1817,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
1823 | MAC_SPEED, link_speed); | 1817 | MAC_SPEED, link_speed); |
1824 | /* On B0, MAC backpressure can be disabled and packets get | 1818 | /* On B0, MAC backpressure can be disabled and packets get |
1825 | * discarded. */ | 1819 | * discarded. */ |
1826 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 1820 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
1827 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, | 1821 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, |
1828 | !efx->link_up); | 1822 | !efx->link_up); |
1829 | } | 1823 | } |
@@ -1841,7 +1835,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
1841 | EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); | 1835 | EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); |
1842 | 1836 | ||
1843 | /* Unisolate the MAC -> RX */ | 1837 | /* Unisolate the MAC -> RX */ |
1844 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1838 | if (falcon_rev(efx) >= FALCON_REV_B0) |
1845 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); | 1839 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); |
1846 | falcon_write(efx, ®, RX_CFG_REG_KER); | 1840 | falcon_write(efx, ®, RX_CFG_REG_KER); |
1847 | } | 1841 | } |
@@ -1856,7 +1850,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) | |||
1856 | return 0; | 1850 | return 0; |
1857 | 1851 | ||
1858 | /* Statistics fetch will fail if the MAC is in TX drain */ | 1852 | /* Statistics fetch will fail if the MAC is in TX drain */ |
1859 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 1853 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
1860 | efx_oword_t temp; | 1854 | efx_oword_t temp; |
1861 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); | 1855 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); |
1862 | if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) | 1856 | if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) |
@@ -1940,7 +1934,7 @@ static int falcon_gmii_wait(struct efx_nic *efx) | |||
1940 | static void falcon_mdio_write(struct net_device *net_dev, int phy_id, | 1934 | static void falcon_mdio_write(struct net_device *net_dev, int phy_id, |
1941 | int addr, int value) | 1935 | int addr, int value) |
1942 | { | 1936 | { |
1943 | struct efx_nic *efx = (struct efx_nic *)net_dev->priv; | 1937 | struct efx_nic *efx = net_dev->priv; |
1944 | unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; | 1938 | unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; |
1945 | efx_oword_t reg; | 1939 | efx_oword_t reg; |
1946 | 1940 | ||
@@ -2008,7 +2002,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id, | |||
2008 | * could be read, -1 will be returned. */ | 2002 | * could be read, -1 will be returned. */ |
2009 | static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) | 2003 | static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) |
2010 | { | 2004 | { |
2011 | struct efx_nic *efx = (struct efx_nic *)net_dev->priv; | 2005 | struct efx_nic *efx = net_dev->priv; |
2012 | unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; | 2006 | unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; |
2013 | efx_oword_t reg; | 2007 | efx_oword_t reg; |
2014 | int value = -1; | 2008 | int value = -1; |
@@ -2113,7 +2107,7 @@ int falcon_probe_port(struct efx_nic *efx) | |||
2113 | falcon_init_mdio(&efx->mii); | 2107 | falcon_init_mdio(&efx->mii); |
2114 | 2108 | ||
2115 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ | 2109 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ |
2116 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2110 | if (falcon_rev(efx) >= FALCON_REV_B0) |
2117 | efx->flow_control = EFX_FC_RX | EFX_FC_TX; | 2111 | efx->flow_control = EFX_FC_RX | EFX_FC_TX; |
2118 | else | 2112 | else |
2119 | efx->flow_control = EFX_FC_RX; | 2113 | efx->flow_control = EFX_FC_RX; |
@@ -2373,7 +2367,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) | |||
2373 | return -ENODEV; | 2367 | return -ENODEV; |
2374 | } | 2368 | } |
2375 | 2369 | ||
2376 | switch (FALCON_REV(efx)) { | 2370 | switch (falcon_rev(efx)) { |
2377 | case FALCON_REV_A0: | 2371 | case FALCON_REV_A0: |
2378 | case 0xff: | 2372 | case 0xff: |
2379 | EFX_ERR(efx, "Falcon rev A0 not supported\n"); | 2373 | EFX_ERR(efx, "Falcon rev A0 not supported\n"); |
@@ -2399,7 +2393,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) | |||
2399 | break; | 2393 | break; |
2400 | 2394 | ||
2401 | default: | 2395 | default: |
2402 | EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); | 2396 | EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx)); |
2403 | return -ENODEV; | 2397 | return -ENODEV; |
2404 | } | 2398 | } |
2405 | 2399 | ||
@@ -2419,7 +2413,7 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2419 | 2413 | ||
2420 | /* Allocate storage for hardware specific data */ | 2414 | /* Allocate storage for hardware specific data */ |
2421 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); | 2415 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); |
2422 | efx->nic_data = (void *) nic_data; | 2416 | efx->nic_data = nic_data; |
2423 | 2417 | ||
2424 | /* Determine number of ports etc. */ | 2418 | /* Determine number of ports etc. */ |
2425 | rc = falcon_probe_nic_variant(efx); | 2419 | rc = falcon_probe_nic_variant(efx); |
@@ -2489,13 +2483,10 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2489 | */ | 2483 | */ |
2490 | int falcon_init_nic(struct efx_nic *efx) | 2484 | int falcon_init_nic(struct efx_nic *efx) |
2491 | { | 2485 | { |
2492 | struct falcon_nic_data *data; | ||
2493 | efx_oword_t temp; | 2486 | efx_oword_t temp; |
2494 | unsigned thresh; | 2487 | unsigned thresh; |
2495 | int rc; | 2488 | int rc; |
2496 | 2489 | ||
2497 | data = (struct falcon_nic_data *)efx->nic_data; | ||
2498 | |||
2499 | /* Set up the address region register. This is only needed | 2490 | /* Set up the address region register. This is only needed |
2500 | * for the B0 FPGA, but since we are just pushing in the | 2491 | * for the B0 FPGA, but since we are just pushing in the |
2501 | * reset defaults this may as well be unconditional. */ | 2492 | * reset defaults this may as well be unconditional. */ |
@@ -2562,7 +2553,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2562 | 2553 | ||
2563 | /* Set number of RSS queues for receive path. */ | 2554 | /* Set number of RSS queues for receive path. */ |
2564 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); | 2555 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); |
2565 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2556 | if (falcon_rev(efx) >= FALCON_REV_B0) |
2566 | EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); | 2557 | EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); |
2567 | else | 2558 | else |
2568 | EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); | 2559 | EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); |
@@ -2600,7 +2591,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2600 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | 2591 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ |
2601 | EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); | 2592 | EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); |
2602 | /* Squash TX of packets of 16 bytes or less */ | 2593 | /* Squash TX of packets of 16 bytes or less */ |
2603 | if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) | 2594 | if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) |
2604 | EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); | 2595 | EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); |
2605 | falcon_write(efx, &temp, TX_CFG2_REG_KER); | 2596 | falcon_write(efx, &temp, TX_CFG2_REG_KER); |
2606 | 2597 | ||
@@ -2617,7 +2608,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2617 | if (EFX_WORKAROUND_7575(efx)) | 2608 | if (EFX_WORKAROUND_7575(efx)) |
2618 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, | 2609 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, |
2619 | (3 * 4096) / 32); | 2610 | (3 * 4096) / 32); |
2620 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2611 | if (falcon_rev(efx) >= FALCON_REV_B0) |
2621 | EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); | 2612 | EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); |
2622 | 2613 | ||
2623 | /* RX FIFO flow control thresholds */ | 2614 | /* RX FIFO flow control thresholds */ |
@@ -2633,7 +2624,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2633 | falcon_write(efx, &temp, RX_CFG_REG_KER); | 2624 | falcon_write(efx, &temp, RX_CFG_REG_KER); |
2634 | 2625 | ||
2635 | /* Set destination of both TX and RX Flush events */ | 2626 | /* Set destination of both TX and RX Flush events */ |
2636 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 2627 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
2637 | EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); | 2628 | EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); |
2638 | falcon_write(efx, &temp, DP_CTRL_REG); | 2629 | falcon_write(efx, &temp, DP_CTRL_REG); |
2639 | } | 2630 | } |
@@ -2647,7 +2638,7 @@ void falcon_remove_nic(struct efx_nic *efx) | |||
2647 | 2638 | ||
2648 | falcon_free_buffer(efx, &efx->irq_status); | 2639 | falcon_free_buffer(efx, &efx->irq_status); |
2649 | 2640 | ||
2650 | (void) falcon_reset_hw(efx, RESET_TYPE_ALL); | 2641 | falcon_reset_hw(efx, RESET_TYPE_ALL); |
2651 | 2642 | ||
2652 | /* Release the second function after the reset */ | 2643 | /* Release the second function after the reset */ |
2653 | if (nic_data->pci_dev2) { | 2644 | if (nic_data->pci_dev2) { |
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h index 6117403b0c03..492f9bc28840 100644 --- a/drivers/net/sfc/falcon.h +++ b/drivers/net/sfc/falcon.h | |||
@@ -23,7 +23,10 @@ enum falcon_revision { | |||
23 | FALCON_REV_B0 = 2, | 23 | FALCON_REV_B0 = 2, |
24 | }; | 24 | }; |
25 | 25 | ||
26 | #define FALCON_REV(efx) ((efx)->pci_dev->revision) | 26 | static inline int falcon_rev(struct efx_nic *efx) |
27 | { | ||
28 | return efx->pci_dev->revision; | ||
29 | } | ||
27 | 30 | ||
28 | extern struct efx_nic_type falcon_a_nic_type; | 31 | extern struct efx_nic_type falcon_a_nic_type; |
29 | extern struct efx_nic_type falcon_b_nic_type; | 32 | extern struct efx_nic_type falcon_b_nic_type; |
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h index 06e2d68fc3d1..6d003114eeab 100644 --- a/drivers/net/sfc/falcon_hwdefs.h +++ b/drivers/net/sfc/falcon_hwdefs.h | |||
@@ -1125,7 +1125,7 @@ struct falcon_nvconfig_board_v2 { | |||
1125 | u8 port1_phy_type; | 1125 | u8 port1_phy_type; |
1126 | __le16 asic_sub_revision; | 1126 | __le16 asic_sub_revision; |
1127 | __le16 board_revision; | 1127 | __le16 board_revision; |
1128 | } __attribute__ ((packed)); | 1128 | } __packed; |
1129 | 1129 | ||
1130 | #define NVCONFIG_BASE 0x300 | 1130 | #define NVCONFIG_BASE 0x300 |
1131 | #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C | 1131 | #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C |
@@ -1144,6 +1144,6 @@ struct falcon_nvconfig { | |||
1144 | __le16 board_struct_ver; | 1144 | __le16 board_struct_ver; |
1145 | __le16 board_checksum; | 1145 | __le16 board_checksum; |
1146 | struct falcon_nvconfig_board_v2 board_v2; | 1146 | struct falcon_nvconfig_board_v2 board_v2; |
1147 | } __attribute__ ((packed)); | 1147 | } __packed; |
1148 | 1148 | ||
1149 | #endif /* EFX_FALCON_HWDEFS_H */ | 1149 | #endif /* EFX_FALCON_HWDEFS_H */ |
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h index ea08184ddfa9..6670cdfc41ab 100644 --- a/drivers/net/sfc/falcon_io.h +++ b/drivers/net/sfc/falcon_io.h | |||
@@ -56,14 +56,27 @@ | |||
56 | #define FALCON_USE_QWORD_IO 1 | 56 | #define FALCON_USE_QWORD_IO 1 |
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | #define _falcon_writeq(efx, value, reg) \ | 59 | #ifdef FALCON_USE_QWORD_IO |
60 | __raw_writeq((__force u64) (value), (efx)->membase + (reg)) | 60 | static inline void _falcon_writeq(struct efx_nic *efx, __le64 value, |
61 | #define _falcon_writel(efx, value, reg) \ | 61 | unsigned int reg) |
62 | __raw_writel((__force u32) (value), (efx)->membase + (reg)) | 62 | { |
63 | #define _falcon_readq(efx, reg) \ | 63 | __raw_writeq((__force u64)value, efx->membase + reg); |
64 | ((__force __le64) __raw_readq((efx)->membase + (reg))) | 64 | } |
65 | #define _falcon_readl(efx, reg) \ | 65 | static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg) |
66 | ((__force __le32) __raw_readl((efx)->membase + (reg))) | 66 | { |
67 | return (__force __le64)__raw_readq(efx->membase + reg); | ||
68 | } | ||
69 | #endif | ||
70 | |||
71 | static inline void _falcon_writel(struct efx_nic *efx, __le32 value, | ||
72 | unsigned int reg) | ||
73 | { | ||
74 | __raw_writel((__force u32)value, efx->membase + reg); | ||
75 | } | ||
76 | static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg) | ||
77 | { | ||
78 | return (__force __le32)__raw_readl(efx->membase + reg); | ||
79 | } | ||
67 | 80 | ||
68 | /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ | 81 | /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ |
69 | static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, | 82 | static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, |
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c index a74b7931a3c4..55c0d9760be8 100644 --- a/drivers/net/sfc/falcon_xmac.c +++ b/drivers/net/sfc/falcon_xmac.c | |||
@@ -221,7 +221,7 @@ static int falcon_xgmii_status(struct efx_nic *efx) | |||
221 | { | 221 | { |
222 | efx_dword_t reg; | 222 | efx_dword_t reg; |
223 | 223 | ||
224 | if (FALCON_REV(efx) < FALCON_REV_B0) | 224 | if (falcon_rev(efx) < FALCON_REV_B0) |
225 | return 1; | 225 | return 1; |
226 | 226 | ||
227 | /* The ISR latches, so clear it and re-read */ | 227 | /* The ISR latches, so clear it and re-read */ |
@@ -241,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable) | |||
241 | { | 241 | { |
242 | efx_dword_t reg; | 242 | efx_dword_t reg; |
243 | 243 | ||
244 | if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) | 244 | if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) |
245 | return; | 245 | return; |
246 | 246 | ||
247 | /* Flush the ISR */ | 247 | /* Flush the ISR */ |
@@ -454,12 +454,12 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx) | |||
454 | 454 | ||
455 | EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", | 455 | EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", |
456 | __func__, tries); | 456 | __func__, tries); |
457 | (void) falcon_reset_xaui(efx); | 457 | falcon_reset_xaui(efx); |
458 | udelay(200); | 458 | udelay(200); |
459 | tries--; | 459 | tries--; |
460 | } | 460 | } |
461 | 461 | ||
462 | EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n", | 462 | EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n", |
463 | max_tries); | 463 | max_tries); |
464 | return 0; | 464 | return 0; |
465 | } | 465 | } |
@@ -572,7 +572,7 @@ int falcon_check_xmac(struct efx_nic *efx) | |||
572 | xaui_link_ok = falcon_xaui_link_ok(efx); | 572 | xaui_link_ok = falcon_xaui_link_ok(efx); |
573 | 573 | ||
574 | if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) | 574 | if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) |
575 | (void) falcon_reset_xaui(efx); | 575 | falcon_reset_xaui(efx); |
576 | 576 | ||
577 | /* Call the PHY check_hw routine */ | 577 | /* Call the PHY check_hw routine */ |
578 | rc = efx->phy_op->check_hw(efx); | 578 | rc = efx->phy_op->check_hw(efx); |
@@ -639,7 +639,7 @@ int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control) | |||
639 | reset = ((flow_control & EFX_FC_TX) && | 639 | reset = ((flow_control & EFX_FC_TX) && |
640 | !(efx->flow_control & EFX_FC_TX)); | 640 | !(efx->flow_control & EFX_FC_TX)); |
641 | if (EFX_WORKAROUND_11482(efx) && reset) { | 641 | if (EFX_WORKAROUND_11482(efx) && reset) { |
642 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 642 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
643 | /* Recover by resetting the EM block */ | 643 | /* Recover by resetting the EM block */ |
644 | if (efx->link_up) | 644 | if (efx->link_up) |
645 | falcon_drain_tx_fifo(efx); | 645 | falcon_drain_tx_fifo(efx); |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 59f261b4171f..5e20e7551dae 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -42,7 +42,7 @@ | |||
42 | #ifndef EFX_DRIVER_NAME | 42 | #ifndef EFX_DRIVER_NAME |
43 | #define EFX_DRIVER_NAME "sfc" | 43 | #define EFX_DRIVER_NAME "sfc" |
44 | #endif | 44 | #endif |
45 | #define EFX_DRIVER_VERSION "2.2.0136" | 45 | #define EFX_DRIVER_VERSION "2.2" |
46 | 46 | ||
47 | #ifdef EFX_ENABLE_DEBUG | 47 | #ifdef EFX_ENABLE_DEBUG |
48 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) | 48 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) |
@@ -52,28 +52,19 @@ | |||
52 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) | 52 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) |
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | #define NET_DEV_REGISTERED(efx) \ | ||
56 | ((efx)->net_dev->reg_state == NETREG_REGISTERED) | ||
57 | |||
58 | /* Include net device name in log messages if it has been registered. | ||
59 | * Use efx->name not efx->net_dev->name so that races with (un)registration | ||
60 | * are harmless. | ||
61 | */ | ||
62 | #define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "") | ||
63 | |||
64 | /* Un-rate-limited logging */ | 55 | /* Un-rate-limited logging */ |
65 | #define EFX_ERR(efx, fmt, args...) \ | 56 | #define EFX_ERR(efx, fmt, args...) \ |
66 | dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args) | 57 | dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args) |
67 | 58 | ||
68 | #define EFX_INFO(efx, fmt, args...) \ | 59 | #define EFX_INFO(efx, fmt, args...) \ |
69 | dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args) | 60 | dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args) |
70 | 61 | ||
71 | #ifdef EFX_ENABLE_DEBUG | 62 | #ifdef EFX_ENABLE_DEBUG |
72 | #define EFX_LOG(efx, fmt, args...) \ | 63 | #define EFX_LOG(efx, fmt, args...) \ |
73 | dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) | 64 | dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) |
74 | #else | 65 | #else |
75 | #define EFX_LOG(efx, fmt, args...) \ | 66 | #define EFX_LOG(efx, fmt, args...) \ |
76 | dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) | 67 | dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) |
77 | #endif | 68 | #endif |
78 | 69 | ||
79 | #define EFX_TRACE(efx, fmt, args...) do {} while (0) | 70 | #define EFX_TRACE(efx, fmt, args...) do {} while (0) |
@@ -90,11 +81,6 @@ do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0) | |||
90 | #define EFX_LOG_RL(efx, fmt, args...) \ | 81 | #define EFX_LOG_RL(efx, fmt, args...) \ |
91 | do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) | 82 | do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) |
92 | 83 | ||
93 | /* Kernel headers may redefine inline anyway */ | ||
94 | #ifndef inline | ||
95 | #define inline inline __attribute__ ((always_inline)) | ||
96 | #endif | ||
97 | |||
98 | /************************************************************************** | 84 | /************************************************************************** |
99 | * | 85 | * |
100 | * Efx data structures | 86 | * Efx data structures |
@@ -695,7 +681,7 @@ struct efx_nic { | |||
695 | struct workqueue_struct *workqueue; | 681 | struct workqueue_struct *workqueue; |
696 | struct work_struct reset_work; | 682 | struct work_struct reset_work; |
697 | struct delayed_work monitor_work; | 683 | struct delayed_work monitor_work; |
698 | unsigned long membase_phys; | 684 | resource_size_t membase_phys; |
699 | void __iomem *membase; | 685 | void __iomem *membase; |
700 | spinlock_t biu_lock; | 686 | spinlock_t biu_lock; |
701 | enum efx_int_mode interrupt_mode; | 687 | enum efx_int_mode interrupt_mode; |
@@ -719,7 +705,7 @@ struct efx_nic { | |||
719 | 705 | ||
720 | unsigned n_rx_nodesc_drop_cnt; | 706 | unsigned n_rx_nodesc_drop_cnt; |
721 | 707 | ||
722 | void *nic_data; | 708 | struct falcon_nic_data *nic_data; |
723 | 709 | ||
724 | struct mutex mac_lock; | 710 | struct mutex mac_lock; |
725 | int port_enabled; | 711 | int port_enabled; |
@@ -760,6 +746,20 @@ struct efx_nic { | |||
760 | void *loopback_selftest; | 746 | void *loopback_selftest; |
761 | }; | 747 | }; |
762 | 748 | ||
749 | static inline int efx_dev_registered(struct efx_nic *efx) | ||
750 | { | ||
751 | return efx->net_dev->reg_state == NETREG_REGISTERED; | ||
752 | } | ||
753 | |||
754 | /* Net device name, for inclusion in log messages if it has been registered. | ||
755 | * Use efx->name not efx->net_dev->name so that races with (un)registration | ||
756 | * are harmless. | ||
757 | */ | ||
758 | static inline const char *efx_dev_name(struct efx_nic *efx) | ||
759 | { | ||
760 | return efx_dev_registered(efx) ? efx->name : ""; | ||
761 | } | ||
762 | |||
763 | /** | 763 | /** |
764 | * struct efx_nic_type - Efx device type definition | 764 | * struct efx_nic_type - Efx device type definition |
765 | * @mem_bar: Memory BAR number | 765 | * @mem_bar: Memory BAR number |
@@ -795,7 +795,7 @@ struct efx_nic_type { | |||
795 | unsigned int txd_ring_mask; | 795 | unsigned int txd_ring_mask; |
796 | unsigned int rxd_ring_mask; | 796 | unsigned int rxd_ring_mask; |
797 | unsigned int evq_size; | 797 | unsigned int evq_size; |
798 | dma_addr_t max_dma_mask; | 798 | u64 max_dma_mask; |
799 | unsigned int tx_dma_mask; | 799 | unsigned int tx_dma_mask; |
800 | unsigned bug5391_mask; | 800 | unsigned bug5391_mask; |
801 | 801 | ||
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 670622373ddf..601b001437c0 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -86,14 +86,17 @@ static unsigned int rx_refill_limit = 95; | |||
86 | */ | 86 | */ |
87 | #define EFX_RXD_HEAD_ROOM 2 | 87 | #define EFX_RXD_HEAD_ROOM 2 |
88 | 88 | ||
89 | /* Macros for zero-order pages (potentially) containing multiple RX buffers */ | 89 | static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) |
90 | #define RX_DATA_OFFSET(_data) \ | 90 | { |
91 | (((unsigned long) (_data)) & (PAGE_SIZE-1)) | 91 | /* Offset is always within one page, so we don't need to consider |
92 | #define RX_BUF_OFFSET(_rx_buf) \ | 92 | * the page order. |
93 | RX_DATA_OFFSET((_rx_buf)->data) | 93 | */ |
94 | 94 | return (__force unsigned long) buf->data & (PAGE_SIZE - 1); | |
95 | #define RX_PAGE_SIZE(_efx) \ | 95 | } |
96 | (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) | 96 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) |
97 | { | ||
98 | return PAGE_SIZE << efx->rx_buffer_order; | ||
99 | } | ||
97 | 100 | ||
98 | 101 | ||
99 | /************************************************************************** | 102 | /************************************************************************** |
@@ -106,7 +109,7 @@ static unsigned int rx_refill_limit = 95; | |||
106 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, | 109 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, |
107 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) | 110 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) |
108 | { | 111 | { |
109 | struct efx_channel *channel = (struct efx_channel *)priv; | 112 | struct efx_channel *channel = priv; |
110 | struct iphdr *iph; | 113 | struct iphdr *iph; |
111 | struct tcphdr *th; | 114 | struct tcphdr *th; |
112 | 115 | ||
@@ -131,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr, | |||
131 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, | 134 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, |
132 | void *priv) | 135 | void *priv) |
133 | { | 136 | { |
134 | struct efx_channel *channel = (struct efx_channel *)priv; | 137 | struct efx_channel *channel = priv; |
135 | struct ethhdr *eh; | 138 | struct ethhdr *eh; |
136 | struct iphdr *iph; | 139 | struct iphdr *iph; |
137 | 140 | ||
138 | /* We support EtherII and VLAN encapsulated IPv4 */ | 141 | /* We support EtherII and VLAN encapsulated IPv4 */ |
139 | eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); | 142 | eh = page_address(frag->page) + frag->page_offset; |
140 | *mac_hdr = eh; | 143 | *mac_hdr = eh; |
141 | 144 | ||
142 | if (eh->h_proto == htons(ETH_P_IP)) { | 145 | if (eh->h_proto == htons(ETH_P_IP)) { |
@@ -269,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
269 | return -ENOMEM; | 272 | return -ENOMEM; |
270 | 273 | ||
271 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, | 274 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, |
272 | 0, RX_PAGE_SIZE(efx), | 275 | 0, efx_rx_buf_size(efx), |
273 | PCI_DMA_FROMDEVICE); | 276 | PCI_DMA_FROMDEVICE); |
274 | 277 | ||
275 | if (unlikely(pci_dma_mapping_error(dma_addr))) { | 278 | if (unlikely(pci_dma_mapping_error(dma_addr))) { |
@@ -280,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
280 | 283 | ||
281 | rx_queue->buf_page = rx_buf->page; | 284 | rx_queue->buf_page = rx_buf->page; |
282 | rx_queue->buf_dma_addr = dma_addr; | 285 | rx_queue->buf_dma_addr = dma_addr; |
283 | rx_queue->buf_data = ((char *) page_address(rx_buf->page) + | 286 | rx_queue->buf_data = (page_address(rx_buf->page) + |
284 | EFX_PAGE_IP_ALIGN); | 287 | EFX_PAGE_IP_ALIGN); |
285 | } | 288 | } |
286 | 289 | ||
287 | offset = RX_DATA_OFFSET(rx_queue->buf_data); | ||
288 | rx_buf->len = bytes; | 290 | rx_buf->len = bytes; |
289 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | ||
290 | rx_buf->data = rx_queue->buf_data; | 291 | rx_buf->data = rx_queue->buf_data; |
292 | offset = efx_rx_buf_offset(rx_buf); | ||
293 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | ||
291 | 294 | ||
292 | /* Try to pack multiple buffers per page */ | 295 | /* Try to pack multiple buffers per page */ |
293 | if (efx->rx_buffer_order == 0) { | 296 | if (efx->rx_buffer_order == 0) { |
@@ -295,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
295 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); | 298 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); |
296 | offset += ((bytes + 0x1ff) & ~0x1ff); | 299 | offset += ((bytes + 0x1ff) & ~0x1ff); |
297 | 300 | ||
298 | space = RX_PAGE_SIZE(efx) - offset; | 301 | space = efx_rx_buf_size(efx) - offset; |
299 | if (space >= bytes) { | 302 | if (space >= bytes) { |
300 | /* Refs dropped on kernel releasing each skb */ | 303 | /* Refs dropped on kernel releasing each skb */ |
301 | get_page(rx_queue->buf_page); | 304 | get_page(rx_queue->buf_page); |
@@ -344,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
344 | EFX_BUG_ON_PARANOID(rx_buf->skb); | 347 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
345 | if (rx_buf->unmap_addr) { | 348 | if (rx_buf->unmap_addr) { |
346 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, | 349 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, |
347 | RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); | 350 | efx_rx_buf_size(efx), |
351 | PCI_DMA_FROMDEVICE); | ||
348 | rx_buf->unmap_addr = 0; | 352 | rx_buf->unmap_addr = 0; |
349 | } | 353 | } |
350 | } else if (likely(rx_buf->skb)) { | 354 | } else if (likely(rx_buf->skb)) { |
@@ -400,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
400 | return 0; | 404 | return 0; |
401 | 405 | ||
402 | /* Record minimum fill level */ | 406 | /* Record minimum fill level */ |
403 | if (unlikely(fill_level < rx_queue->min_fill)) | 407 | if (unlikely(fill_level < rx_queue->min_fill)) { |
404 | if (fill_level) | 408 | if (fill_level) |
405 | rx_queue->min_fill = fill_level; | 409 | rx_queue->min_fill = fill_level; |
410 | } | ||
406 | 411 | ||
407 | /* Acquire RX add lock. If this lock is contended, then a fast | 412 | /* Acquire RX add lock. If this lock is contended, then a fast |
408 | * fill must already be in progress (e.g. in the refill | 413 | * fill must already be in progress (e.g. in the refill |
@@ -552,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel, | |||
552 | struct skb_frag_struct frags; | 557 | struct skb_frag_struct frags; |
553 | 558 | ||
554 | frags.page = rx_buf->page; | 559 | frags.page = rx_buf->page; |
555 | frags.page_offset = RX_BUF_OFFSET(rx_buf); | 560 | frags.page_offset = efx_rx_buf_offset(rx_buf); |
556 | frags.size = rx_buf->len; | 561 | frags.size = rx_buf->len; |
557 | 562 | ||
558 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, | 563 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, |
@@ -597,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, | |||
597 | if (unlikely(rx_buf->len > hdr_len)) { | 602 | if (unlikely(rx_buf->len > hdr_len)) { |
598 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; | 603 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; |
599 | frag->page = rx_buf->page; | 604 | frag->page = rx_buf->page; |
600 | frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; | 605 | frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; |
601 | frag->size = skb->len - hdr_len; | 606 | frag->size = skb->len - hdr_len; |
602 | skb_shinfo(skb)->nr_frags = 1; | 607 | skb_shinfo(skb)->nr_frags = 1; |
603 | skb->data_len = frag->size; | 608 | skb->data_len = frag->size; |
@@ -851,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
851 | /* For a page that is part-way through splitting into RX buffers */ | 856 | /* For a page that is part-way through splitting into RX buffers */ |
852 | if (rx_queue->buf_page != NULL) { | 857 | if (rx_queue->buf_page != NULL) { |
853 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, | 858 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, |
854 | RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); | 859 | efx_rx_buf_size(rx_queue->efx), |
860 | PCI_DMA_FROMDEVICE); | ||
855 | __free_pages(rx_queue->buf_page, | 861 | __free_pages(rx_queue->buf_page, |
856 | rx_queue->efx->rx_buffer_order); | 862 | rx_queue->efx->rx_buffer_order); |
857 | rx_queue->buf_page = NULL; | 863 | rx_queue->buf_page = NULL; |
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index cbda15946e8f..3b2de9fe7f27 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c | |||
@@ -290,7 +290,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx, | |||
290 | 290 | ||
291 | payload = &state->payload; | 291 | payload = &state->payload; |
292 | 292 | ||
293 | received = (struct efx_loopback_payload *)(char *) buf_ptr; | 293 | received = (struct efx_loopback_payload *) buf_ptr; |
294 | received->ip.saddr = payload->ip.saddr; | 294 | received->ip.saddr = payload->ip.saddr; |
295 | received->ip.check = payload->ip.check; | 295 | received->ip.check = payload->ip.check; |
296 | 296 | ||
@@ -424,10 +424,10 @@ static int efx_tx_loopback(struct efx_tx_queue *tx_queue) | |||
424 | * interrupt handler. */ | 424 | * interrupt handler. */ |
425 | smp_wmb(); | 425 | smp_wmb(); |
426 | 426 | ||
427 | if (NET_DEV_REGISTERED(efx)) | 427 | if (efx_dev_registered(efx)) |
428 | netif_tx_lock_bh(efx->net_dev); | 428 | netif_tx_lock_bh(efx->net_dev); |
429 | rc = efx_xmit(efx, tx_queue, skb); | 429 | rc = efx_xmit(efx, tx_queue, skb); |
430 | if (NET_DEV_REGISTERED(efx)) | 430 | if (efx_dev_registered(efx)) |
431 | netif_tx_unlock_bh(efx->net_dev); | 431 | netif_tx_unlock_bh(efx->net_dev); |
432 | 432 | ||
433 | if (rc != NETDEV_TX_OK) { | 433 | if (rc != NETDEV_TX_OK) { |
@@ -453,7 +453,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue, | |||
453 | int tx_done = 0, rx_good, rx_bad; | 453 | int tx_done = 0, rx_good, rx_bad; |
454 | int i, rc = 0; | 454 | int i, rc = 0; |
455 | 455 | ||
456 | if (NET_DEV_REGISTERED(efx)) | 456 | if (efx_dev_registered(efx)) |
457 | netif_tx_lock_bh(efx->net_dev); | 457 | netif_tx_lock_bh(efx->net_dev); |
458 | 458 | ||
459 | /* Count the number of tx completions, and decrement the refcnt. Any | 459 | /* Count the number of tx completions, and decrement the refcnt. Any |
@@ -465,7 +465,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue, | |||
465 | dev_kfree_skb_any(skb); | 465 | dev_kfree_skb_any(skb); |
466 | } | 466 | } |
467 | 467 | ||
468 | if (NET_DEV_REGISTERED(efx)) | 468 | if (efx_dev_registered(efx)) |
469 | netif_tx_unlock_bh(efx->net_dev); | 469 | netif_tx_unlock_bh(efx->net_dev); |
470 | 470 | ||
471 | /* Check TX completion and received packet counts */ | 471 | /* Check TX completion and received packet counts */ |
@@ -517,6 +517,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, | |||
517 | state->packet_count = min(1 << (i << 2), state->packet_count); | 517 | state->packet_count = min(1 << (i << 2), state->packet_count); |
518 | state->skbs = kzalloc(sizeof(state->skbs[0]) * | 518 | state->skbs = kzalloc(sizeof(state->skbs[0]) * |
519 | state->packet_count, GFP_KERNEL); | 519 | state->packet_count, GFP_KERNEL); |
520 | if (!state->skbs) | ||
521 | return -ENOMEM; | ||
520 | state->flush = 0; | 522 | state->flush = 0; |
521 | 523 | ||
522 | EFX_LOG(efx, "TX queue %d testing %s loopback with %d " | 524 | EFX_LOG(efx, "TX queue %d testing %s loopback with %d " |
@@ -700,7 +702,7 @@ int efx_offline_test(struct efx_nic *efx, | |||
700 | * "flushing" so all inflight packets are dropped */ | 702 | * "flushing" so all inflight packets are dropped */ |
701 | BUG_ON(efx->loopback_selftest); | 703 | BUG_ON(efx->loopback_selftest); |
702 | state->flush = 1; | 704 | state->flush = 1; |
703 | efx->loopback_selftest = (void *)state; | 705 | efx->loopback_selftest = state; |
704 | 706 | ||
705 | rc = efx_test_loopbacks(efx, tests, loopback_modes); | 707 | rc = efx_test_loopbacks(efx, tests, loopback_modes); |
706 | 708 | ||
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c index 725d1a539c49..66a0d1442aba 100644 --- a/drivers/net/sfc/sfe4001.c +++ b/drivers/net/sfc/sfe4001.c | |||
@@ -116,18 +116,18 @@ void sfe4001_poweroff(struct efx_nic *efx) | |||
116 | 116 | ||
117 | /* Turn off all power rails */ | 117 | /* Turn off all power rails */ |
118 | out = 0xff; | 118 | out = 0xff; |
119 | (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | 119 | efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); |
120 | 120 | ||
121 | /* Disable port 1 outputs on IO expander */ | 121 | /* Disable port 1 outputs on IO expander */ |
122 | cfg = 0xff; | 122 | cfg = 0xff; |
123 | (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); | 123 | efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); |
124 | 124 | ||
125 | /* Disable port 0 outputs on IO expander */ | 125 | /* Disable port 0 outputs on IO expander */ |
126 | cfg = 0xff; | 126 | cfg = 0xff; |
127 | (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); | 127 | efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); |
128 | 128 | ||
129 | /* Clear any over-temperature alert */ | 129 | /* Clear any over-temperature alert */ |
130 | (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); | 130 | efx_i2c_read(i2c, MAX6647, RSL, &in, 1); |
131 | } | 131 | } |
132 | 132 | ||
133 | /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected | 133 | /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected |
@@ -253,14 +253,14 @@ done: | |||
253 | fail3: | 253 | fail3: |
254 | /* Turn off all power rails */ | 254 | /* Turn off all power rails */ |
255 | out = 0xff; | 255 | out = 0xff; |
256 | (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | 256 | efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); |
257 | /* Disable port 1 outputs on IO expander */ | 257 | /* Disable port 1 outputs on IO expander */ |
258 | out = 0xff; | 258 | out = 0xff; |
259 | (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); | 259 | efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); |
260 | fail2: | 260 | fail2: |
261 | /* Disable port 0 outputs on IO expander */ | 261 | /* Disable port 0 outputs on IO expander */ |
262 | out = 0xff; | 262 | out = 0xff; |
263 | (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); | 263 | efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); |
264 | fail1: | 264 | fail1: |
265 | return rc; | 265 | return rc; |
266 | } | 266 | } |
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index b1cd6deec01f..c0146061c326 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c | |||
@@ -211,6 +211,8 @@ static int tenxpress_phy_init(struct efx_nic *efx) | |||
211 | int rc = 0; | 211 | int rc = 0; |
212 | 212 | ||
213 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); | 213 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); |
214 | if (!phy_data) | ||
215 | return -ENOMEM; | ||
214 | efx->phy_data = phy_data; | 216 | efx->phy_data = phy_data; |
215 | 217 | ||
216 | tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); | 218 | tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); |
@@ -376,7 +378,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx) | |||
376 | * perform a special software reset */ | 378 | * perform a special software reset */ |
377 | if ((phy_data->tx_disabled && !efx->tx_disabled) || | 379 | if ((phy_data->tx_disabled && !efx->tx_disabled) || |
378 | loop_change) { | 380 | loop_change) { |
379 | (void) tenxpress_special_reset(efx); | 381 | tenxpress_special_reset(efx); |
380 | falcon_reset_xaui(efx); | 382 | falcon_reset_xaui(efx); |
381 | } | 383 | } |
382 | 384 | ||
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 9b436f5b4888..5cdd082ab8f6 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -387,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
387 | if (unlikely(tx_queue->stopped)) { | 387 | if (unlikely(tx_queue->stopped)) { |
388 | fill_level = tx_queue->insert_count - tx_queue->read_count; | 388 | fill_level = tx_queue->insert_count - tx_queue->read_count; |
389 | if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { | 389 | if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { |
390 | EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx)); | 390 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); |
391 | 391 | ||
392 | /* Do this under netif_tx_lock(), to avoid racing | 392 | /* Do this under netif_tx_lock(), to avoid racing |
393 | * with efx_xmit(). */ | 393 | * with efx_xmit(). */ |
@@ -639,11 +639,12 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, | |||
639 | base_dma = tsoh->dma_addr & PAGE_MASK; | 639 | base_dma = tsoh->dma_addr & PAGE_MASK; |
640 | 640 | ||
641 | p = &tx_queue->tso_headers_free; | 641 | p = &tx_queue->tso_headers_free; |
642 | while (*p != NULL) | 642 | while (*p != NULL) { |
643 | if (((unsigned long)*p & PAGE_MASK) == base_kva) | 643 | if (((unsigned long)*p & PAGE_MASK) == base_kva) |
644 | *p = (*p)->next; | 644 | *p = (*p)->next; |
645 | else | 645 | else |
646 | p = &(*p)->next; | 646 | p = &(*p)->next; |
647 | } | ||
647 | 648 | ||
648 | pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); | 649 | pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); |
649 | } | 650 | } |
@@ -939,9 +940,10 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, | |||
939 | 940 | ||
940 | /* Allocate a DMA-mapped header buffer. */ | 941 | /* Allocate a DMA-mapped header buffer. */ |
941 | if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { | 942 | if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { |
942 | if (tx_queue->tso_headers_free == NULL) | 943 | if (tx_queue->tso_headers_free == NULL) { |
943 | if (efx_tsoh_block_alloc(tx_queue)) | 944 | if (efx_tsoh_block_alloc(tx_queue)) |
944 | return -1; | 945 | return -1; |
946 | } | ||
945 | EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); | 947 | EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); |
946 | tsoh = tx_queue->tso_headers_free; | 948 | tsoh = tx_queue->tso_headers_free; |
947 | tx_queue->tso_headers_free = tsoh->next; | 949 | tx_queue->tso_headers_free = tsoh->next; |
@@ -1106,9 +1108,10 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue) | |||
1106 | { | 1108 | { |
1107 | unsigned i; | 1109 | unsigned i; |
1108 | 1110 | ||
1109 | if (tx_queue->buffer) | 1111 | if (tx_queue->buffer) { |
1110 | for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) | 1112 | for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) |
1111 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); | 1113 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); |
1114 | } | ||
1112 | 1115 | ||
1113 | while (tx_queue->tso_headers_free != NULL) | 1116 | while (tx_queue->tso_headers_free != NULL) |
1114 | efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, | 1117 | efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, |
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index dca62f190198..35ab19c27f8d 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h | |||
@@ -16,7 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 | 18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 |
19 | #define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1) | 19 | #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) |
20 | 20 | ||
21 | /* XAUI resets if link not detected */ | 21 | /* XAUI resets if link not detected */ |
22 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS | 22 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS |
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c index 3b9f9ddbc372..f3684ad28887 100644 --- a/drivers/net/sfc/xfp_phy.c +++ b/drivers/net/sfc/xfp_phy.c | |||
@@ -85,7 +85,9 @@ static int xfp_phy_init(struct efx_nic *efx) | |||
85 | int rc; | 85 | int rc; |
86 | 86 | ||
87 | phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); | 87 | phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); |
88 | efx->phy_data = (void *) phy_data; | 88 | if (!phy_data) |
89 | return -ENOMEM; | ||
90 | efx->phy_data = phy_data; | ||
89 | 91 | ||
90 | EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" | 92 | EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" |
91 | " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), | 93 | " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index f226bcac7d17..c8a5ef2d75f4 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -118,6 +118,7 @@ static const struct pci_device_id sky2_id_table[] = { | |||
118 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */ | 118 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */ |
119 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */ | 119 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */ |
120 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */ | 120 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */ |
121 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */ | ||
121 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */ | 122 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */ |
122 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */ | 123 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */ |
123 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */ | 124 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */ |
@@ -1159,17 +1160,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1159 | } | 1160 | } |
1160 | 1161 | ||
1161 | #ifdef SKY2_VLAN_TAG_USED | 1162 | #ifdef SKY2_VLAN_TAG_USED |
1162 | static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | 1163 | static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff) |
1163 | { | 1164 | { |
1164 | struct sky2_port *sky2 = netdev_priv(dev); | 1165 | if (onoff) { |
1165 | struct sky2_hw *hw = sky2->hw; | ||
1166 | u16 port = sky2->port; | ||
1167 | |||
1168 | netif_tx_lock_bh(dev); | ||
1169 | napi_disable(&hw->napi); | ||
1170 | |||
1171 | sky2->vlgrp = grp; | ||
1172 | if (grp) { | ||
1173 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), | 1166 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), |
1174 | RX_VLAN_STRIP_ON); | 1167 | RX_VLAN_STRIP_ON); |
1175 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | 1168 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), |
@@ -1180,6 +1173,19 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp | |||
1180 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | 1173 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), |
1181 | TX_VLAN_TAG_OFF); | 1174 | TX_VLAN_TAG_OFF); |
1182 | } | 1175 | } |
1176 | } | ||
1177 | |||
1178 | static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
1179 | { | ||
1180 | struct sky2_port *sky2 = netdev_priv(dev); | ||
1181 | struct sky2_hw *hw = sky2->hw; | ||
1182 | u16 port = sky2->port; | ||
1183 | |||
1184 | netif_tx_lock_bh(dev); | ||
1185 | napi_disable(&hw->napi); | ||
1186 | |||
1187 | sky2->vlgrp = grp; | ||
1188 | sky2_set_vlan_mode(hw, port, grp != NULL); | ||
1183 | 1189 | ||
1184 | sky2_read32(hw, B0_Y2_SP_LISR); | 1190 | sky2_read32(hw, B0_Y2_SP_LISR); |
1185 | napi_enable(&hw->napi); | 1191 | napi_enable(&hw->napi); |
@@ -1418,6 +1424,10 @@ static int sky2_up(struct net_device *dev) | |||
1418 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, | 1424 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, |
1419 | TX_RING_SIZE - 1); | 1425 | TX_RING_SIZE - 1); |
1420 | 1426 | ||
1427 | #ifdef SKY2_VLAN_TAG_USED | ||
1428 | sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); | ||
1429 | #endif | ||
1430 | |||
1421 | err = sky2_rx_start(sky2); | 1431 | err = sky2_rx_start(sky2); |
1422 | if (err) | 1432 | if (err) |
1423 | goto err_out; | 1433 | goto err_out; |
@@ -4395,7 +4405,9 @@ static int sky2_resume(struct pci_dev *pdev) | |||
4395 | if (err) { | 4405 | if (err) { |
4396 | printk(KERN_ERR PFX "%s: could not up: %d\n", | 4406 | printk(KERN_ERR PFX "%s: could not up: %d\n", |
4397 | dev->name, err); | 4407 | dev->name, err); |
4408 | rtnl_lock(); | ||
4398 | dev_close(dev); | 4409 | dev_close(dev); |
4410 | rtnl_unlock(); | ||
4399 | goto out; | 4411 | goto out; |
4400 | } | 4412 | } |
4401 | } | 4413 | } |
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index 4e2800205189..e2ee91a6ae7e 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c | |||
@@ -136,7 +136,6 @@ struct smc911x_local { | |||
136 | 136 | ||
137 | /* work queue */ | 137 | /* work queue */ |
138 | struct work_struct phy_configure; | 138 | struct work_struct phy_configure; |
139 | int work_pending; | ||
140 | 139 | ||
141 | int tx_throttle; | 140 | int tx_throttle; |
142 | spinlock_t lock; | 141 | spinlock_t lock; |
@@ -960,11 +959,11 @@ static void smc911x_phy_configure(struct work_struct *work) | |||
960 | * We should not be called if phy_type is zero. | 959 | * We should not be called if phy_type is zero. |
961 | */ | 960 | */ |
962 | if (lp->phy_type == 0) | 961 | if (lp->phy_type == 0) |
963 | goto smc911x_phy_configure_exit_nolock; | 962 | return; |
964 | 963 | ||
965 | if (smc911x_phy_reset(dev, phyaddr)) { | 964 | if (smc911x_phy_reset(dev, phyaddr)) { |
966 | printk("%s: PHY reset timed out\n", dev->name); | 965 | printk("%s: PHY reset timed out\n", dev->name); |
967 | goto smc911x_phy_configure_exit_nolock; | 966 | return; |
968 | } | 967 | } |
969 | spin_lock_irqsave(&lp->lock, flags); | 968 | spin_lock_irqsave(&lp->lock, flags); |
970 | 969 | ||
@@ -1033,8 +1032,6 @@ static void smc911x_phy_configure(struct work_struct *work) | |||
1033 | 1032 | ||
1034 | smc911x_phy_configure_exit: | 1033 | smc911x_phy_configure_exit: |
1035 | spin_unlock_irqrestore(&lp->lock, flags); | 1034 | spin_unlock_irqrestore(&lp->lock, flags); |
1036 | smc911x_phy_configure_exit_nolock: | ||
1037 | lp->work_pending = 0; | ||
1038 | } | 1035 | } |
1039 | 1036 | ||
1040 | /* | 1037 | /* |
@@ -1356,11 +1353,8 @@ static void smc911x_timeout(struct net_device *dev) | |||
1356 | * smc911x_phy_configure() calls msleep() which calls schedule_timeout() | 1353 | * smc911x_phy_configure() calls msleep() which calls schedule_timeout() |
1357 | * which calls schedule(). Hence we use a work queue. | 1354 | * which calls schedule(). Hence we use a work queue. |
1358 | */ | 1355 | */ |
1359 | if (lp->phy_type != 0) { | 1356 | if (lp->phy_type != 0) |
1360 | if (schedule_work(&lp->phy_configure)) { | 1357 | schedule_work(&lp->phy_configure); |
1361 | lp->work_pending = 1; | ||
1362 | } | ||
1363 | } | ||
1364 | 1358 | ||
1365 | /* We can accept TX packets again */ | 1359 | /* We can accept TX packets again */ |
1366 | dev->trans_start = jiffies; | 1360 | dev->trans_start = jiffies; |
@@ -1531,16 +1525,8 @@ static int smc911x_close(struct net_device *dev) | |||
1531 | if (lp->phy_type != 0) { | 1525 | if (lp->phy_type != 0) { |
1532 | /* We need to ensure that no calls to | 1526 | /* We need to ensure that no calls to |
1533 | * smc911x_phy_configure are pending. | 1527 | * smc911x_phy_configure are pending. |
1534 | |||
1535 | * flush_scheduled_work() cannot be called because we | ||
1536 | * are running with the netlink semaphore held (from | ||
1537 | * devinet_ioctl()) and the pending work queue | ||
1538 | * contains linkwatch_event() (scheduled by | ||
1539 | * netif_carrier_off() above). linkwatch_event() also | ||
1540 | * wants the netlink semaphore. | ||
1541 | */ | 1528 | */ |
1542 | while (lp->work_pending) | 1529 | cancel_work_sync(&lp->phy_configure); |
1543 | schedule(); | ||
1544 | smc911x_phy_powerdown(dev, lp->mii.phy_id); | 1530 | smc911x_phy_powerdown(dev, lp->mii.phy_id); |
1545 | } | 1531 | } |
1546 | 1532 | ||
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index a188e33484e6..f2051b209da2 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c | |||
@@ -1016,15 +1016,8 @@ static void smc_phy_powerdown(struct net_device *dev) | |||
1016 | 1016 | ||
1017 | /* We need to ensure that no calls to smc_phy_configure are | 1017 | /* We need to ensure that no calls to smc_phy_configure are |
1018 | pending. | 1018 | pending. |
1019 | |||
1020 | flush_scheduled_work() cannot be called because we are | ||
1021 | running with the netlink semaphore held (from | ||
1022 | devinet_ioctl()) and the pending work queue contains | ||
1023 | linkwatch_event() (scheduled by netif_carrier_off() | ||
1024 | above). linkwatch_event() also wants the netlink semaphore. | ||
1025 | */ | 1019 | */ |
1026 | while(lp->work_pending) | 1020 | cancel_work_sync(&lp->phy_configure); |
1027 | yield(); | ||
1028 | 1021 | ||
1029 | bmcr = smc_phy_read(dev, phy, MII_BMCR); | 1022 | bmcr = smc_phy_read(dev, phy, MII_BMCR); |
1030 | smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN); | 1023 | smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN); |
@@ -1161,7 +1154,6 @@ static void smc_phy_configure(struct work_struct *work) | |||
1161 | smc_phy_configure_exit: | 1154 | smc_phy_configure_exit: |
1162 | SMC_SELECT_BANK(lp, 2); | 1155 | SMC_SELECT_BANK(lp, 2); |
1163 | spin_unlock_irq(&lp->lock); | 1156 | spin_unlock_irq(&lp->lock); |
1164 | lp->work_pending = 0; | ||
1165 | } | 1157 | } |
1166 | 1158 | ||
1167 | /* | 1159 | /* |
@@ -1389,11 +1381,8 @@ static void smc_timeout(struct net_device *dev) | |||
1389 | * smc_phy_configure() calls msleep() which calls schedule_timeout() | 1381 | * smc_phy_configure() calls msleep() which calls schedule_timeout() |
1390 | * which calls schedule(). Hence we use a work queue. | 1382 | * which calls schedule(). Hence we use a work queue. |
1391 | */ | 1383 | */ |
1392 | if (lp->phy_type != 0) { | 1384 | if (lp->phy_type != 0) |
1393 | if (schedule_work(&lp->phy_configure)) { | 1385 | schedule_work(&lp->phy_configure); |
1394 | lp->work_pending = 1; | ||
1395 | } | ||
1396 | } | ||
1397 | 1386 | ||
1398 | /* We can accept TX packets again */ | 1387 | /* We can accept TX packets again */ |
1399 | dev->trans_start = jiffies; | 1388 | dev->trans_start = jiffies; |
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 69e97a1cb1c4..8606818653f8 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h | |||
@@ -93,14 +93,14 @@ | |||
93 | #define SMC_insw(a, r, p, l) insw ((unsigned long *)((a) + (r)), p, l) | 93 | #define SMC_insw(a, r, p, l) insw ((unsigned long *)((a) + (r)), p, l) |
94 | # endif | 94 | # endif |
95 | /* check if the mac in reg is valid */ | 95 | /* check if the mac in reg is valid */ |
96 | #define SMC_GET_MAC_ADDR(addr) \ | 96 | #define SMC_GET_MAC_ADDR(lp, addr) \ |
97 | do { \ | 97 | do { \ |
98 | unsigned int __v; \ | 98 | unsigned int __v; \ |
99 | __v = SMC_inw(ioaddr, ADDR0_REG); \ | 99 | __v = SMC_inw(ioaddr, ADDR0_REG(lp)); \ |
100 | addr[0] = __v; addr[1] = __v >> 8; \ | 100 | addr[0] = __v; addr[1] = __v >> 8; \ |
101 | __v = SMC_inw(ioaddr, ADDR1_REG); \ | 101 | __v = SMC_inw(ioaddr, ADDR1_REG(lp)); \ |
102 | addr[2] = __v; addr[3] = __v >> 8; \ | 102 | addr[2] = __v; addr[3] = __v >> 8; \ |
103 | __v = SMC_inw(ioaddr, ADDR2_REG); \ | 103 | __v = SMC_inw(ioaddr, ADDR2_REG(lp)); \ |
104 | addr[4] = __v; addr[5] = __v >> 8; \ | 104 | addr[4] = __v; addr[5] = __v >> 8; \ |
105 | if (*(u32 *)(&addr[0]) == 0xFFFFFFFF) { \ | 105 | if (*(u32 *)(&addr[0]) == 0xFFFFFFFF) { \ |
106 | random_ether_addr(addr); \ | 106 | random_ether_addr(addr); \ |
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index b4e7f30ea4e8..1aa425be3067 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c | |||
@@ -111,7 +111,7 @@ static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigne | |||
111 | struct hme_tx_logent *tlp; | 111 | struct hme_tx_logent *tlp; |
112 | unsigned long flags; | 112 | unsigned long flags; |
113 | 113 | ||
114 | save_and_cli(flags); | 114 | local_irq_save(flags); |
115 | tlp = &tx_log[txlog_cur_entry]; | 115 | tlp = &tx_log[txlog_cur_entry]; |
116 | tlp->tstamp = (unsigned int)jiffies; | 116 | tlp->tstamp = (unsigned int)jiffies; |
117 | tlp->tx_new = hp->tx_new; | 117 | tlp->tx_new = hp->tx_new; |
@@ -119,7 +119,7 @@ static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigne | |||
119 | tlp->action = a; | 119 | tlp->action = a; |
120 | tlp->status = s; | 120 | tlp->status = s; |
121 | txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1); | 121 | txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1); |
122 | restore_flags(flags); | 122 | local_irq_restore(flags); |
123 | } | 123 | } |
124 | static __inline__ void tx_dump_log(void) | 124 | static __inline__ void tx_dump_log(void) |
125 | { | 125 | { |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 07b3f77e7626..cc4bde852542 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -64,8 +64,8 @@ | |||
64 | 64 | ||
65 | #define DRV_MODULE_NAME "tg3" | 65 | #define DRV_MODULE_NAME "tg3" |
66 | #define PFX DRV_MODULE_NAME ": " | 66 | #define PFX DRV_MODULE_NAME ": " |
67 | #define DRV_MODULE_VERSION "3.92" | 67 | #define DRV_MODULE_VERSION "3.92.1" |
68 | #define DRV_MODULE_RELDATE "May 2, 2008" | 68 | #define DRV_MODULE_RELDATE "June 9, 2008" |
69 | 69 | ||
70 | #define TG3_DEF_MAC_MODE 0 | 70 | #define TG3_DEF_MAC_MODE 0 |
71 | #define TG3_DEF_RX_MODE 0 | 71 | #define TG3_DEF_RX_MODE 0 |
@@ -1295,6 +1295,21 @@ static void tg3_frob_aux_power(struct tg3 *tp) | |||
1295 | GRC_LCLCTRL_GPIO_OUTPUT0 | | 1295 | GRC_LCLCTRL_GPIO_OUTPUT0 | |
1296 | GRC_LCLCTRL_GPIO_OUTPUT1), | 1296 | GRC_LCLCTRL_GPIO_OUTPUT1), |
1297 | 100); | 1297 | 100); |
1298 | } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) { | ||
1299 | /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ | ||
1300 | u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | | ||
1301 | GRC_LCLCTRL_GPIO_OE1 | | ||
1302 | GRC_LCLCTRL_GPIO_OE2 | | ||
1303 | GRC_LCLCTRL_GPIO_OUTPUT0 | | ||
1304 | GRC_LCLCTRL_GPIO_OUTPUT1 | | ||
1305 | tp->grc_local_ctrl; | ||
1306 | tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); | ||
1307 | |||
1308 | grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; | ||
1309 | tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); | ||
1310 | |||
1311 | grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; | ||
1312 | tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); | ||
1298 | } else { | 1313 | } else { |
1299 | u32 no_gpio2; | 1314 | u32 no_gpio2; |
1300 | u32 grc_local_ctrl = 0; | 1315 | u32 grc_local_ctrl = 0; |
@@ -3168,8 +3183,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) | |||
3168 | err |= tg3_readphy(tp, MII_BMCR, &bmcr); | 3183 | err |= tg3_readphy(tp, MII_BMCR, &bmcr); |
3169 | 3184 | ||
3170 | if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && | 3185 | if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && |
3171 | (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) && | 3186 | (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { |
3172 | tp->link_config.flowctrl == tp->link_config.active_flowctrl) { | ||
3173 | /* do nothing, just check for link up at the end */ | 3187 | /* do nothing, just check for link up at the end */ |
3174 | } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { | 3188 | } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { |
3175 | u32 adv, new_adv; | 3189 | u32 adv, new_adv; |
@@ -8599,7 +8613,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
8599 | (cmd->speed == SPEED_1000)) | 8613 | (cmd->speed == SPEED_1000)) |
8600 | return -EINVAL; | 8614 | return -EINVAL; |
8601 | else if ((cmd->speed == SPEED_1000) && | 8615 | else if ((cmd->speed == SPEED_1000) && |
8602 | (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY)) | 8616 | (tp->tg3_flags & TG3_FLAG_10_100_ONLY)) |
8603 | return -EINVAL; | 8617 | return -EINVAL; |
8604 | 8618 | ||
8605 | tg3_full_lock(tp, 0); | 8619 | tg3_full_lock(tp, 0); |
@@ -11768,6 +11782,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
11768 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) | 11782 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) |
11769 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; | 11783 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; |
11770 | 11784 | ||
11785 | if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) { | ||
11786 | /* Turn off the debug UART. */ | ||
11787 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; | ||
11788 | if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) | ||
11789 | /* Keep VMain power. */ | ||
11790 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | | ||
11791 | GRC_LCLCTRL_GPIO_OUTPUT0; | ||
11792 | } | ||
11793 | |||
11771 | /* Force the chip into D0. */ | 11794 | /* Force the chip into D0. */ |
11772 | err = tg3_set_power_state(tp, PCI_D0); | 11795 | err = tg3_set_power_state(tp, PCI_D0); |
11773 | if (err) { | 11796 | if (err) { |
diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h index b880cba0f6fd..74cf8e1a181b 100644 --- a/drivers/net/tokenring/3c359.h +++ b/drivers/net/tokenring/3c359.h | |||
@@ -264,7 +264,7 @@ struct xl_private { | |||
264 | u16 asb; | 264 | u16 asb; |
265 | 265 | ||
266 | u8 __iomem *xl_mmio; | 266 | u8 __iomem *xl_mmio; |
267 | char *xl_card_name; | 267 | const char *xl_card_name; |
268 | struct pci_dev *pdev ; | 268 | struct pci_dev *pdev ; |
269 | 269 | ||
270 | spinlock_t xl_lock ; | 270 | spinlock_t xl_lock ; |
diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h index c91956310fb2..10fbba08978f 100644 --- a/drivers/net/tokenring/olympic.h +++ b/drivers/net/tokenring/olympic.h | |||
@@ -254,7 +254,7 @@ struct olympic_private { | |||
254 | u8 __iomem *olympic_mmio; | 254 | u8 __iomem *olympic_mmio; |
255 | u8 __iomem *olympic_lap; | 255 | u8 __iomem *olympic_lap; |
256 | struct pci_dev *pdev ; | 256 | struct pci_dev *pdev ; |
257 | char *olympic_card_name ; | 257 | const char *olympic_card_name; |
258 | 258 | ||
259 | spinlock_t olympic_lock ; | 259 | spinlock_t olympic_lock ; |
260 | 260 | ||
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index f9d13fa05d64..af8d2c436efd 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -731,7 +731,7 @@ static void tulip_down (struct net_device *dev) | |||
731 | void __iomem *ioaddr = tp->base_addr; | 731 | void __iomem *ioaddr = tp->base_addr; |
732 | unsigned long flags; | 732 | unsigned long flags; |
733 | 733 | ||
734 | flush_scheduled_work(); | 734 | cancel_work_sync(&tp->media_work); |
735 | 735 | ||
736 | #ifdef CONFIG_TULIP_NAPI | 736 | #ifdef CONFIG_TULIP_NAPI |
737 | napi_disable(&tp->napi); | 737 | napi_disable(&tp->napi); |
@@ -1729,12 +1729,15 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) | |||
1729 | if (!dev) | 1729 | if (!dev) |
1730 | return -EINVAL; | 1730 | return -EINVAL; |
1731 | 1731 | ||
1732 | if (netif_running(dev)) | 1732 | if (!netif_running(dev)) |
1733 | tulip_down(dev); | 1733 | goto save_state; |
1734 | |||
1735 | tulip_down(dev); | ||
1734 | 1736 | ||
1735 | netif_device_detach(dev); | 1737 | netif_device_detach(dev); |
1736 | free_irq(dev->irq, dev); | 1738 | free_irq(dev->irq, dev); |
1737 | 1739 | ||
1740 | save_state: | ||
1738 | pci_save_state(pdev); | 1741 | pci_save_state(pdev); |
1739 | pci_disable_device(pdev); | 1742 | pci_disable_device(pdev); |
1740 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 1743 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
@@ -1754,6 +1757,9 @@ static int tulip_resume(struct pci_dev *pdev) | |||
1754 | pci_set_power_state(pdev, PCI_D0); | 1757 | pci_set_power_state(pdev, PCI_D0); |
1755 | pci_restore_state(pdev); | 1758 | pci_restore_state(pdev); |
1756 | 1759 | ||
1760 | if (!netif_running(dev)) | ||
1761 | return 0; | ||
1762 | |||
1757 | if ((retval = pci_enable_device(pdev))) { | 1763 | if ((retval = pci_enable_device(pdev))) { |
1758 | printk (KERN_ERR "tulip: pci_enable_device failed in resume\n"); | 1764 | printk (KERN_ERR "tulip: pci_enable_device failed in resume\n"); |
1759 | return retval; | 1765 | return retval; |
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index 2511ca7a12aa..e9e628621639 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c | |||
@@ -225,6 +225,9 @@ static void uli526x_set_filter_mode(struct net_device *); | |||
225 | static const struct ethtool_ops netdev_ethtool_ops; | 225 | static const struct ethtool_ops netdev_ethtool_ops; |
226 | static u16 read_srom_word(long, int); | 226 | static u16 read_srom_word(long, int); |
227 | static irqreturn_t uli526x_interrupt(int, void *); | 227 | static irqreturn_t uli526x_interrupt(int, void *); |
228 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
229 | static void uli526x_poll(struct net_device *dev); | ||
230 | #endif | ||
228 | static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); | 231 | static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); |
229 | static void allocate_rx_buffer(struct uli526x_board_info *); | 232 | static void allocate_rx_buffer(struct uli526x_board_info *); |
230 | static void update_cr6(u32, unsigned long); | 233 | static void update_cr6(u32, unsigned long); |
@@ -339,6 +342,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, | |||
339 | dev->get_stats = &uli526x_get_stats; | 342 | dev->get_stats = &uli526x_get_stats; |
340 | dev->set_multicast_list = &uli526x_set_filter_mode; | 343 | dev->set_multicast_list = &uli526x_set_filter_mode; |
341 | dev->ethtool_ops = &netdev_ethtool_ops; | 344 | dev->ethtool_ops = &netdev_ethtool_ops; |
345 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
346 | dev->poll_controller = &uli526x_poll; | ||
347 | #endif | ||
342 | spin_lock_init(&db->lock); | 348 | spin_lock_init(&db->lock); |
343 | 349 | ||
344 | 350 | ||
@@ -681,8 +687,9 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id) | |||
681 | db->cr5_data = inl(ioaddr + DCR5); | 687 | db->cr5_data = inl(ioaddr + DCR5); |
682 | outl(db->cr5_data, ioaddr + DCR5); | 688 | outl(db->cr5_data, ioaddr + DCR5); |
683 | if ( !(db->cr5_data & 0x180c1) ) { | 689 | if ( !(db->cr5_data & 0x180c1) ) { |
684 | spin_unlock_irqrestore(&db->lock, flags); | 690 | /* Restore CR7 to enable interrupt mask */ |
685 | outl(db->cr7_data, ioaddr + DCR7); | 691 | outl(db->cr7_data, ioaddr + DCR7); |
692 | spin_unlock_irqrestore(&db->lock, flags); | ||
686 | return IRQ_HANDLED; | 693 | return IRQ_HANDLED; |
687 | } | 694 | } |
688 | 695 | ||
@@ -715,6 +722,13 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id) | |||
715 | return IRQ_HANDLED; | 722 | return IRQ_HANDLED; |
716 | } | 723 | } |
717 | 724 | ||
725 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
726 | static void uli526x_poll(struct net_device *dev) | ||
727 | { | ||
728 | /* ISR grabs the irqsave lock, so this should be safe */ | ||
729 | uli526x_interrupt(dev->irq, dev); | ||
730 | } | ||
731 | #endif | ||
718 | 732 | ||
719 | /* | 733 | /* |
720 | * Free TX resource after TX complete | 734 | * Free TX resource after TX complete |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 0ce07a339c7e..7ab94c825b57 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -313,6 +313,21 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, | |||
313 | 313 | ||
314 | switch (tun->flags & TUN_TYPE_MASK) { | 314 | switch (tun->flags & TUN_TYPE_MASK) { |
315 | case TUN_TUN_DEV: | 315 | case TUN_TUN_DEV: |
316 | if (tun->flags & TUN_NO_PI) { | ||
317 | switch (skb->data[0] & 0xf0) { | ||
318 | case 0x40: | ||
319 | pi.proto = htons(ETH_P_IP); | ||
320 | break; | ||
321 | case 0x60: | ||
322 | pi.proto = htons(ETH_P_IPV6); | ||
323 | break; | ||
324 | default: | ||
325 | tun->dev->stats.rx_dropped++; | ||
326 | kfree_skb(skb); | ||
327 | return -EINVAL; | ||
328 | } | ||
329 | } | ||
330 | |||
316 | skb_reset_mac_header(skb); | 331 | skb_reset_mac_header(skb); |
317 | skb->protocol = pi.proto; | 332 | skb->protocol = pi.proto; |
318 | skb->dev = tun->dev; | 333 | skb->dev = tun->dev; |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index ca0bdac07a78..fb0b918e5ccb 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -237,7 +237,7 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, | |||
237 | skb->dev = ugeth->dev; | 237 | skb->dev = ugeth->dev; |
238 | 238 | ||
239 | out_be32(&((struct qe_bd __iomem *)bd)->buf, | 239 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
240 | dma_map_single(NULL, | 240 | dma_map_single(&ugeth->dev->dev, |
241 | skb->data, | 241 | skb->data, |
242 | ugeth->ug_info->uf_info.max_rx_buf_length + | 242 | ugeth->ug_info->uf_info.max_rx_buf_length + |
243 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, | 243 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, |
@@ -2158,7 +2158,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) | |||
2158 | continue; | 2158 | continue; |
2159 | for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { | 2159 | for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { |
2160 | if (ugeth->tx_skbuff[i][j]) { | 2160 | if (ugeth->tx_skbuff[i][j]) { |
2161 | dma_unmap_single(NULL, | 2161 | dma_unmap_single(&ugeth->dev->dev, |
2162 | in_be32(&((struct qe_bd __iomem *)bd)->buf), | 2162 | in_be32(&((struct qe_bd __iomem *)bd)->buf), |
2163 | (in_be32((u32 __iomem *)bd) & | 2163 | (in_be32((u32 __iomem *)bd) & |
2164 | BD_LENGTH_MASK), | 2164 | BD_LENGTH_MASK), |
@@ -2186,7 +2186,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) | |||
2186 | bd = ugeth->p_rx_bd_ring[i]; | 2186 | bd = ugeth->p_rx_bd_ring[i]; |
2187 | for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { | 2187 | for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { |
2188 | if (ugeth->rx_skbuff[i][j]) { | 2188 | if (ugeth->rx_skbuff[i][j]) { |
2189 | dma_unmap_single(NULL, | 2189 | dma_unmap_single(&ugeth->dev->dev, |
2190 | in_be32(&((struct qe_bd __iomem *)bd)->buf), | 2190 | in_be32(&((struct qe_bd __iomem *)bd)->buf), |
2191 | ugeth->ug_info-> | 2191 | ugeth->ug_info-> |
2192 | uf_info.max_rx_buf_length + | 2192 | uf_info.max_rx_buf_length + |
@@ -3406,7 +3406,8 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3406 | 3406 | ||
3407 | /* set up the buffer descriptor */ | 3407 | /* set up the buffer descriptor */ |
3408 | out_be32(&((struct qe_bd __iomem *)bd)->buf, | 3408 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
3409 | dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); | 3409 | dma_map_single(&ugeth->dev->dev, skb->data, |
3410 | skb->len, DMA_TO_DEVICE)); | ||
3410 | 3411 | ||
3411 | /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ | 3412 | /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ |
3412 | 3413 | ||
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c index 299b7f176950..f5839c4a5cbd 100644 --- a/drivers/net/ucc_geth_ethtool.c +++ b/drivers/net/ucc_geth_ethtool.c | |||
@@ -73,6 +73,7 @@ static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { | |||
73 | "tx-frames-ok", | 73 | "tx-frames-ok", |
74 | "tx-excessive-differ-frames", | 74 | "tx-excessive-differ-frames", |
75 | "tx-256-511-frames", | 75 | "tx-256-511-frames", |
76 | "tx-512-1023-frames", | ||
76 | "tx-1024-1518-frames", | 77 | "tx-1024-1518-frames", |
77 | "tx-jumbo-frames", | 78 | "tx-jumbo-frames", |
78 | }; | 79 | }; |
@@ -308,7 +309,7 @@ static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) | |||
308 | buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN; | 309 | buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN; |
309 | } | 310 | } |
310 | if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) | 311 | if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) |
311 | memcpy(buf, tx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN * | 312 | memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN * |
312 | ETH_GSTRING_LEN); | 313 | ETH_GSTRING_LEN); |
313 | } | 314 | } |
314 | 315 | ||
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index dc6f097062df..37ecf845edfe 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c | |||
@@ -1440,6 +1440,10 @@ static const struct usb_device_id products [] = { | |||
1440 | // Belkin F5D5055 | 1440 | // Belkin F5D5055 |
1441 | USB_DEVICE(0x050d, 0x5055), | 1441 | USB_DEVICE(0x050d, 0x5055), |
1442 | .driver_info = (unsigned long) &ax88178_info, | 1442 | .driver_info = (unsigned long) &ax88178_info, |
1443 | }, { | ||
1444 | // Apple USB Ethernet Adapter | ||
1445 | USB_DEVICE(0x05ac, 0x1402), | ||
1446 | .driver_info = (unsigned long) &ax88772_info, | ||
1443 | }, | 1447 | }, |
1444 | { }, // END | 1448 | { }, // END |
1445 | }; | 1449 | }; |
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 76752d84a30f..22c17bbacb69 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c | |||
@@ -423,7 +423,10 @@ static int catc_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
423 | 423 | ||
424 | catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; | 424 | catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; |
425 | tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; | 425 | tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; |
426 | *((u16*)tx_buf) = (catc->is_f5u011) ? cpu_to_be16((u16)skb->len) : cpu_to_le16((u16)skb->len); | 426 | if (catc->is_f5u011) |
427 | *(__be16 *)tx_buf = cpu_to_be16(skb->len); | ||
428 | else | ||
429 | *(__le16 *)tx_buf = cpu_to_le16(skb->len); | ||
427 | skb_copy_from_linear_data(skb, tx_buf + 2, skb->len); | 430 | skb_copy_from_linear_data(skb, tx_buf + 2, skb->len); |
428 | catc->tx_ptr += skb->len + 2; | 431 | catc->tx_ptr += skb->len + 2; |
429 | 432 | ||
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c index 0ec7936cbe21..c66b9c324f54 100644 --- a/drivers/net/usb/cdc_subset.c +++ b/drivers/net/usb/cdc_subset.c | |||
@@ -218,7 +218,7 @@ static const struct driver_info blob_info = { | |||
218 | /*-------------------------------------------------------------------------*/ | 218 | /*-------------------------------------------------------------------------*/ |
219 | 219 | ||
220 | #ifndef HAVE_HARDWARE | 220 | #ifndef HAVE_HARDWARE |
221 | #error You need to configure some hardware for this driver | 221 | #warning You need to configure some hardware for this driver |
222 | #endif | 222 | #endif |
223 | 223 | ||
224 | /* | 224 | /* |
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 0dcfc0310264..7c66b052f55a 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c | |||
@@ -706,7 +706,7 @@ static void kaweth_kill_urbs(struct kaweth_device *kaweth) | |||
706 | usb_kill_urb(kaweth->rx_urb); | 706 | usb_kill_urb(kaweth->rx_urb); |
707 | usb_kill_urb(kaweth->tx_urb); | 707 | usb_kill_urb(kaweth->tx_urb); |
708 | 708 | ||
709 | flush_scheduled_work(); | 709 | cancel_delayed_work_sync(&kaweth->lowmem_work); |
710 | 710 | ||
711 | /* a scheduled work may have resubmitted, | 711 | /* a scheduled work may have resubmitted, |
712 | we hit them again */ | 712 | we hit them again */ |
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 21a7785cb8b6..ae467f182c40 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c | |||
@@ -194,7 +194,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf) | |||
194 | dev_dbg(&info->control->dev, | 194 | dev_dbg(&info->control->dev, |
195 | "rndis response error, code %d\n", retval); | 195 | "rndis response error, code %d\n", retval); |
196 | } | 196 | } |
197 | msleep(2); | 197 | msleep(20); |
198 | } | 198 | } |
199 | dev_dbg(&info->control->dev, "rndis response timeout\n"); | 199 | dev_dbg(&info->control->dev, "rndis response timeout\n"); |
200 | return -ETIMEDOUT; | 200 | return -ETIMEDOUT; |
@@ -283,8 +283,8 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) | |||
283 | struct rndis_set_c *set_c; | 283 | struct rndis_set_c *set_c; |
284 | struct rndis_halt *halt; | 284 | struct rndis_halt *halt; |
285 | } u; | 285 | } u; |
286 | u32 tmp, phym_unspec; | 286 | u32 tmp; |
287 | __le32 *phym; | 287 | __le32 phym_unspec, *phym; |
288 | int reply_len; | 288 | int reply_len; |
289 | unsigned char *bp; | 289 | unsigned char *bp; |
290 | 290 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f926b5ab3d09..4452306d5328 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -44,9 +44,16 @@ struct virtnet_info | |||
44 | /* The skb we couldn't send because buffers were full. */ | 44 | /* The skb we couldn't send because buffers were full. */ |
45 | struct sk_buff *last_xmit_skb; | 45 | struct sk_buff *last_xmit_skb; |
46 | 46 | ||
47 | /* If we need to free in a timer, this is it. */ | ||
48 | struct timer_list xmit_free_timer; | ||
49 | |||
47 | /* Number of input buffers, and max we've ever had. */ | 50 | /* Number of input buffers, and max we've ever had. */ |
48 | unsigned int num, max; | 51 | unsigned int num, max; |
49 | 52 | ||
53 | /* For cleaning up after transmission. */ | ||
54 | struct tasklet_struct tasklet; | ||
55 | bool free_in_tasklet; | ||
56 | |||
50 | /* Receive & send queues. */ | 57 | /* Receive & send queues. */ |
51 | struct sk_buff_head recv; | 58 | struct sk_buff_head recv; |
52 | struct sk_buff_head send; | 59 | struct sk_buff_head send; |
@@ -68,8 +75,13 @@ static void skb_xmit_done(struct virtqueue *svq) | |||
68 | 75 | ||
69 | /* Suppress further interrupts. */ | 76 | /* Suppress further interrupts. */ |
70 | svq->vq_ops->disable_cb(svq); | 77 | svq->vq_ops->disable_cb(svq); |
71 | /* We were waiting for more output buffers. */ | 78 | |
79 | /* We were probably waiting for more output buffers. */ | ||
72 | netif_wake_queue(vi->dev); | 80 | netif_wake_queue(vi->dev); |
81 | |||
82 | /* Make sure we re-xmit last_xmit_skb: if there are no more packets | ||
83 | * queued, start_xmit won't be called. */ | ||
84 | tasklet_schedule(&vi->tasklet); | ||
73 | } | 85 | } |
74 | 86 | ||
75 | static void receive_skb(struct net_device *dev, struct sk_buff *skb, | 87 | static void receive_skb(struct net_device *dev, struct sk_buff *skb, |
@@ -86,9 +98,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb, | |||
86 | BUG_ON(len > MAX_PACKET_LEN); | 98 | BUG_ON(len > MAX_PACKET_LEN); |
87 | 99 | ||
88 | skb_trim(skb, len); | 100 | skb_trim(skb, len); |
89 | skb->protocol = eth_type_trans(skb, dev); | 101 | |
90 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", | ||
91 | ntohs(skb->protocol), skb->len, skb->pkt_type); | ||
92 | dev->stats.rx_bytes += skb->len; | 102 | dev->stats.rx_bytes += skb->len; |
93 | dev->stats.rx_packets++; | 103 | dev->stats.rx_packets++; |
94 | 104 | ||
@@ -98,6 +108,10 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb, | |||
98 | goto frame_err; | 108 | goto frame_err; |
99 | } | 109 | } |
100 | 110 | ||
111 | skb->protocol = eth_type_trans(skb, dev); | ||
112 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", | ||
113 | ntohs(skb->protocol), skb->len, skb->pkt_type); | ||
114 | |||
101 | if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { | 115 | if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
102 | pr_debug("GSO!\n"); | 116 | pr_debug("GSO!\n"); |
103 | switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { | 117 | switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
@@ -230,9 +244,25 @@ static void free_old_xmit_skbs(struct virtnet_info *vi) | |||
230 | } | 244 | } |
231 | } | 245 | } |
232 | 246 | ||
247 | /* If the virtio transport doesn't always notify us when all in-flight packets | ||
248 | * are consumed, we fall back to using this function on a timer to free them. */ | ||
249 | static void xmit_free(unsigned long data) | ||
250 | { | ||
251 | struct virtnet_info *vi = (void *)data; | ||
252 | |||
253 | netif_tx_lock(vi->dev); | ||
254 | |||
255 | free_old_xmit_skbs(vi); | ||
256 | |||
257 | if (!skb_queue_empty(&vi->send)) | ||
258 | mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10)); | ||
259 | |||
260 | netif_tx_unlock(vi->dev); | ||
261 | } | ||
262 | |||
233 | static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) | 263 | static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) |
234 | { | 264 | { |
235 | int num; | 265 | int num, err; |
236 | struct scatterlist sg[2+MAX_SKB_FRAGS]; | 266 | struct scatterlist sg[2+MAX_SKB_FRAGS]; |
237 | struct virtio_net_hdr *hdr; | 267 | struct virtio_net_hdr *hdr; |
238 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; | 268 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
@@ -275,7 +305,25 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) | |||
275 | vnet_hdr_to_sg(sg, skb); | 305 | vnet_hdr_to_sg(sg, skb); |
276 | num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; | 306 | num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; |
277 | 307 | ||
278 | return vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); | 308 | err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); |
309 | if (!err && !vi->free_in_tasklet) | ||
310 | mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10)); | ||
311 | |||
312 | return err; | ||
313 | } | ||
314 | |||
315 | static void xmit_tasklet(unsigned long data) | ||
316 | { | ||
317 | struct virtnet_info *vi = (void *)data; | ||
318 | |||
319 | netif_tx_lock_bh(vi->dev); | ||
320 | if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) { | ||
321 | vi->svq->vq_ops->kick(vi->svq); | ||
322 | vi->last_xmit_skb = NULL; | ||
323 | } | ||
324 | if (vi->free_in_tasklet) | ||
325 | free_old_xmit_skbs(vi); | ||
326 | netif_tx_unlock_bh(vi->dev); | ||
279 | } | 327 | } |
280 | 328 | ||
281 | static int start_xmit(struct sk_buff *skb, struct net_device *dev) | 329 | static int start_xmit(struct sk_buff *skb, struct net_device *dev) |
@@ -287,21 +335,25 @@ again: | |||
287 | free_old_xmit_skbs(vi); | 335 | free_old_xmit_skbs(vi); |
288 | 336 | ||
289 | /* If we has a buffer left over from last time, send it now. */ | 337 | /* If we has a buffer left over from last time, send it now. */ |
290 | if (vi->last_xmit_skb) { | 338 | if (unlikely(vi->last_xmit_skb)) { |
291 | if (xmit_skb(vi, vi->last_xmit_skb) != 0) { | 339 | if (xmit_skb(vi, vi->last_xmit_skb) != 0) { |
292 | /* Drop this skb: we only queue one. */ | 340 | /* Drop this skb: we only queue one. */ |
293 | vi->dev->stats.tx_dropped++; | 341 | vi->dev->stats.tx_dropped++; |
294 | kfree_skb(skb); | 342 | kfree_skb(skb); |
343 | skb = NULL; | ||
295 | goto stop_queue; | 344 | goto stop_queue; |
296 | } | 345 | } |
297 | vi->last_xmit_skb = NULL; | 346 | vi->last_xmit_skb = NULL; |
298 | } | 347 | } |
299 | 348 | ||
300 | /* Put new one in send queue and do transmit */ | 349 | /* Put new one in send queue and do transmit */ |
301 | __skb_queue_head(&vi->send, skb); | 350 | if (likely(skb)) { |
302 | if (xmit_skb(vi, skb) != 0) { | 351 | __skb_queue_head(&vi->send, skb); |
303 | vi->last_xmit_skb = skb; | 352 | if (xmit_skb(vi, skb) != 0) { |
304 | goto stop_queue; | 353 | vi->last_xmit_skb = skb; |
354 | skb = NULL; | ||
355 | goto stop_queue; | ||
356 | } | ||
305 | } | 357 | } |
306 | done: | 358 | done: |
307 | vi->svq->vq_ops->kick(vi->svq); | 359 | vi->svq->vq_ops->kick(vi->svq); |
@@ -411,6 +463,10 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
411 | vi->vdev = vdev; | 463 | vi->vdev = vdev; |
412 | vdev->priv = vi; | 464 | vdev->priv = vi; |
413 | 465 | ||
466 | /* If they give us a callback when all buffers are done, we don't need | ||
467 | * the timer. */ | ||
468 | vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY); | ||
469 | |||
414 | /* We expect two virtqueues, receive then send. */ | 470 | /* We expect two virtqueues, receive then send. */ |
415 | vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); | 471 | vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); |
416 | if (IS_ERR(vi->rvq)) { | 472 | if (IS_ERR(vi->rvq)) { |
@@ -428,6 +484,11 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
428 | skb_queue_head_init(&vi->recv); | 484 | skb_queue_head_init(&vi->recv); |
429 | skb_queue_head_init(&vi->send); | 485 | skb_queue_head_init(&vi->send); |
430 | 486 | ||
487 | tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi); | ||
488 | |||
489 | if (!vi->free_in_tasklet) | ||
490 | setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi); | ||
491 | |||
431 | err = register_netdev(dev); | 492 | err = register_netdev(dev); |
432 | if (err) { | 493 | if (err) { |
433 | pr_debug("virtio_net: registering device failed\n"); | 494 | pr_debug("virtio_net: registering device failed\n"); |
@@ -465,13 +526,15 @@ static void virtnet_remove(struct virtio_device *vdev) | |||
465 | /* Stop all the virtqueues. */ | 526 | /* Stop all the virtqueues. */ |
466 | vdev->config->reset(vdev); | 527 | vdev->config->reset(vdev); |
467 | 528 | ||
529 | if (!vi->free_in_tasklet) | ||
530 | del_timer_sync(&vi->xmit_free_timer); | ||
531 | |||
468 | /* Free our skbs in send and recv queues, if any. */ | 532 | /* Free our skbs in send and recv queues, if any. */ |
469 | while ((skb = __skb_dequeue(&vi->recv)) != NULL) { | 533 | while ((skb = __skb_dequeue(&vi->recv)) != NULL) { |
470 | kfree_skb(skb); | 534 | kfree_skb(skb); |
471 | vi->num--; | 535 | vi->num--; |
472 | } | 536 | } |
473 | while ((skb = __skb_dequeue(&vi->send)) != NULL) | 537 | __skb_queue_purge(&vi->send); |
474 | kfree_skb(skb); | ||
475 | 538 | ||
476 | BUG_ON(vi->num != 0); | 539 | BUG_ON(vi->num != 0); |
477 | 540 | ||
@@ -489,7 +552,7 @@ static struct virtio_device_id id_table[] = { | |||
489 | static unsigned int features[] = { | 552 | static unsigned int features[] = { |
490 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, | 553 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, |
491 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, | 554 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, |
492 | VIRTIO_NET_F_HOST_ECN, | 555 | VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY, |
493 | }; | 556 | }; |
494 | 557 | ||
495 | static struct virtio_driver virtio_net = { | 558 | static struct virtio_driver virtio_net = { |
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index 9a83c9d5b8cf..7f984895b0d5 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c | |||
@@ -43,8 +43,7 @@ static const char* version = "HDLC support module revision 1.22"; | |||
43 | 43 | ||
44 | #undef DEBUG_LINK | 44 | #undef DEBUG_LINK |
45 | 45 | ||
46 | static struct hdlc_proto *first_proto = NULL; | 46 | static struct hdlc_proto *first_proto; |
47 | |||
48 | 47 | ||
49 | static int hdlc_change_mtu(struct net_device *dev, int new_mtu) | 48 | static int hdlc_change_mtu(struct net_device *dev, int new_mtu) |
50 | { | 49 | { |
@@ -314,21 +313,25 @@ void detach_hdlc_protocol(struct net_device *dev) | |||
314 | 313 | ||
315 | void register_hdlc_protocol(struct hdlc_proto *proto) | 314 | void register_hdlc_protocol(struct hdlc_proto *proto) |
316 | { | 315 | { |
316 | rtnl_lock(); | ||
317 | proto->next = first_proto; | 317 | proto->next = first_proto; |
318 | first_proto = proto; | 318 | first_proto = proto; |
319 | rtnl_unlock(); | ||
319 | } | 320 | } |
320 | 321 | ||
321 | 322 | ||
322 | void unregister_hdlc_protocol(struct hdlc_proto *proto) | 323 | void unregister_hdlc_protocol(struct hdlc_proto *proto) |
323 | { | 324 | { |
324 | struct hdlc_proto **p = &first_proto; | 325 | struct hdlc_proto **p; |
325 | while (*p) { | 326 | |
326 | if (*p == proto) { | 327 | rtnl_lock(); |
327 | *p = proto->next; | 328 | p = &first_proto; |
328 | return; | 329 | while (*p != proto) { |
329 | } | 330 | BUG_ON(!*p); |
330 | p = &((*p)->next); | 331 | p = &((*p)->next); |
331 | } | 332 | } |
333 | *p = proto->next; | ||
334 | rtnl_unlock(); | ||
332 | } | 335 | } |
333 | 336 | ||
334 | 337 | ||
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 7133c688cf20..762d21c1c703 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c | |||
@@ -56,6 +56,7 @@ struct cisco_state { | |||
56 | cisco_proto settings; | 56 | cisco_proto settings; |
57 | 57 | ||
58 | struct timer_list timer; | 58 | struct timer_list timer; |
59 | spinlock_t lock; | ||
59 | unsigned long last_poll; | 60 | unsigned long last_poll; |
60 | int up; | 61 | int up; |
61 | int request_sent; | 62 | int request_sent; |
@@ -158,6 +159,7 @@ static int cisco_rx(struct sk_buff *skb) | |||
158 | { | 159 | { |
159 | struct net_device *dev = skb->dev; | 160 | struct net_device *dev = skb->dev; |
160 | hdlc_device *hdlc = dev_to_hdlc(dev); | 161 | hdlc_device *hdlc = dev_to_hdlc(dev); |
162 | struct cisco_state *st = state(hdlc); | ||
161 | struct hdlc_header *data = (struct hdlc_header*)skb->data; | 163 | struct hdlc_header *data = (struct hdlc_header*)skb->data; |
162 | struct cisco_packet *cisco_data; | 164 | struct cisco_packet *cisco_data; |
163 | struct in_device *in_dev; | 165 | struct in_device *in_dev; |
@@ -220,11 +222,12 @@ static int cisco_rx(struct sk_buff *skb) | |||
220 | goto rx_error; | 222 | goto rx_error; |
221 | 223 | ||
222 | case CISCO_KEEPALIVE_REQ: | 224 | case CISCO_KEEPALIVE_REQ: |
223 | state(hdlc)->rxseq = ntohl(cisco_data->par1); | 225 | spin_lock(&st->lock); |
224 | if (state(hdlc)->request_sent && | 226 | st->rxseq = ntohl(cisco_data->par1); |
225 | ntohl(cisco_data->par2) == state(hdlc)->txseq) { | 227 | if (st->request_sent && |
226 | state(hdlc)->last_poll = jiffies; | 228 | ntohl(cisco_data->par2) == st->txseq) { |
227 | if (!state(hdlc)->up) { | 229 | st->last_poll = jiffies; |
230 | if (!st->up) { | ||
228 | u32 sec, min, hrs, days; | 231 | u32 sec, min, hrs, days; |
229 | sec = ntohl(cisco_data->time) / 1000; | 232 | sec = ntohl(cisco_data->time) / 1000; |
230 | min = sec / 60; sec -= min * 60; | 233 | min = sec / 60; sec -= min * 60; |
@@ -232,12 +235,12 @@ static int cisco_rx(struct sk_buff *skb) | |||
232 | days = hrs / 24; hrs -= days * 24; | 235 | days = hrs / 24; hrs -= days * 24; |
233 | printk(KERN_INFO "%s: Link up (peer " | 236 | printk(KERN_INFO "%s: Link up (peer " |
234 | "uptime %ud%uh%um%us)\n", | 237 | "uptime %ud%uh%um%us)\n", |
235 | dev->name, days, hrs, | 238 | dev->name, days, hrs, min, sec); |
236 | min, sec); | ||
237 | netif_dormant_off(dev); | 239 | netif_dormant_off(dev); |
238 | state(hdlc)->up = 1; | 240 | st->up = 1; |
239 | } | 241 | } |
240 | } | 242 | } |
243 | spin_unlock(&st->lock); | ||
241 | 244 | ||
242 | dev_kfree_skb_any(skb); | 245 | dev_kfree_skb_any(skb); |
243 | return NET_RX_SUCCESS; | 246 | return NET_RX_SUCCESS; |
@@ -261,24 +264,25 @@ static void cisco_timer(unsigned long arg) | |||
261 | { | 264 | { |
262 | struct net_device *dev = (struct net_device *)arg; | 265 | struct net_device *dev = (struct net_device *)arg; |
263 | hdlc_device *hdlc = dev_to_hdlc(dev); | 266 | hdlc_device *hdlc = dev_to_hdlc(dev); |
267 | struct cisco_state *st = state(hdlc); | ||
264 | 268 | ||
265 | if (state(hdlc)->up && | 269 | spin_lock(&st->lock); |
266 | time_after(jiffies, state(hdlc)->last_poll + | 270 | if (st->up && |
267 | state(hdlc)->settings.timeout * HZ)) { | 271 | time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) { |
268 | state(hdlc)->up = 0; | 272 | st->up = 0; |
269 | printk(KERN_INFO "%s: Link down\n", dev->name); | 273 | printk(KERN_INFO "%s: Link down\n", dev->name); |
270 | netif_dormant_on(dev); | 274 | netif_dormant_on(dev); |
271 | } | 275 | } |
272 | 276 | ||
273 | cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, | 277 | cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq), |
274 | htonl(++state(hdlc)->txseq), | 278 | htonl(st->rxseq)); |
275 | htonl(state(hdlc)->rxseq)); | 279 | st->request_sent = 1; |
276 | state(hdlc)->request_sent = 1; | 280 | spin_unlock(&st->lock); |
277 | state(hdlc)->timer.expires = jiffies + | 281 | |
278 | state(hdlc)->settings.interval * HZ; | 282 | st->timer.expires = jiffies + st->settings.interval * HZ; |
279 | state(hdlc)->timer.function = cisco_timer; | 283 | st->timer.function = cisco_timer; |
280 | state(hdlc)->timer.data = arg; | 284 | st->timer.data = arg; |
281 | add_timer(&state(hdlc)->timer); | 285 | add_timer(&st->timer); |
282 | } | 286 | } |
283 | 287 | ||
284 | 288 | ||
@@ -286,15 +290,20 @@ static void cisco_timer(unsigned long arg) | |||
286 | static void cisco_start(struct net_device *dev) | 290 | static void cisco_start(struct net_device *dev) |
287 | { | 291 | { |
288 | hdlc_device *hdlc = dev_to_hdlc(dev); | 292 | hdlc_device *hdlc = dev_to_hdlc(dev); |
289 | state(hdlc)->up = 0; | 293 | struct cisco_state *st = state(hdlc); |
290 | state(hdlc)->request_sent = 0; | 294 | unsigned long flags; |
291 | state(hdlc)->txseq = state(hdlc)->rxseq = 0; | 295 | |
292 | 296 | spin_lock_irqsave(&st->lock, flags); | |
293 | init_timer(&state(hdlc)->timer); | 297 | st->up = 0; |
294 | state(hdlc)->timer.expires = jiffies + HZ; /*First poll after 1s*/ | 298 | st->request_sent = 0; |
295 | state(hdlc)->timer.function = cisco_timer; | 299 | st->txseq = st->rxseq = 0; |
296 | state(hdlc)->timer.data = (unsigned long)dev; | 300 | spin_unlock_irqrestore(&st->lock, flags); |
297 | add_timer(&state(hdlc)->timer); | 301 | |
302 | init_timer(&st->timer); | ||
303 | st->timer.expires = jiffies + HZ; /* First poll after 1 s */ | ||
304 | st->timer.function = cisco_timer; | ||
305 | st->timer.data = (unsigned long)dev; | ||
306 | add_timer(&st->timer); | ||
298 | } | 307 | } |
299 | 308 | ||
300 | 309 | ||
@@ -302,10 +311,16 @@ static void cisco_start(struct net_device *dev) | |||
302 | static void cisco_stop(struct net_device *dev) | 311 | static void cisco_stop(struct net_device *dev) |
303 | { | 312 | { |
304 | hdlc_device *hdlc = dev_to_hdlc(dev); | 313 | hdlc_device *hdlc = dev_to_hdlc(dev); |
305 | del_timer_sync(&state(hdlc)->timer); | 314 | struct cisco_state *st = state(hdlc); |
315 | unsigned long flags; | ||
316 | |||
317 | del_timer_sync(&st->timer); | ||
318 | |||
319 | spin_lock_irqsave(&st->lock, flags); | ||
306 | netif_dormant_on(dev); | 320 | netif_dormant_on(dev); |
307 | state(hdlc)->up = 0; | 321 | st->up = 0; |
308 | state(hdlc)->request_sent = 0; | 322 | st->request_sent = 0; |
323 | spin_unlock_irqrestore(&st->lock, flags); | ||
309 | } | 324 | } |
310 | 325 | ||
311 | 326 | ||
@@ -367,6 +382,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
367 | return result; | 382 | return result; |
368 | 383 | ||
369 | memcpy(&state(hdlc)->settings, &new_settings, size); | 384 | memcpy(&state(hdlc)->settings, &new_settings, size); |
385 | spin_lock_init(&state(hdlc)->lock); | ||
370 | dev->hard_start_xmit = hdlc->xmit; | 386 | dev->hard_start_xmit = hdlc->xmit; |
371 | dev->header_ops = &cisco_header_ops; | 387 | dev->header_ops = &cisco_header_ops; |
372 | dev->type = ARPHRD_CISCO; | 388 | dev->type = ARPHRD_CISCO; |
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 45f47c1c0a35..32019fb878d8 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -2668,6 +2668,7 @@ static struct net_device *init_wifidev(struct airo_info *ai, | |||
2668 | dev->irq = ethdev->irq; | 2668 | dev->irq = ethdev->irq; |
2669 | dev->base_addr = ethdev->base_addr; | 2669 | dev->base_addr = ethdev->base_addr; |
2670 | dev->wireless_data = ethdev->wireless_data; | 2670 | dev->wireless_data = ethdev->wireless_data; |
2671 | SET_NETDEV_DEV(dev, ethdev->dev.parent); | ||
2671 | memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len); | 2672 | memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len); |
2672 | err = register_netdev(dev); | 2673 | err = register_netdev(dev); |
2673 | if (err<0) { | 2674 | if (err<0) { |
@@ -2904,7 +2905,7 @@ EXPORT_SYMBOL(init_airo_card); | |||
2904 | 2905 | ||
2905 | static int waitbusy (struct airo_info *ai) { | 2906 | static int waitbusy (struct airo_info *ai) { |
2906 | int delay = 0; | 2907 | int delay = 0; |
2907 | while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) & (delay < 10000)) { | 2908 | while ((IN4500(ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) { |
2908 | udelay (10); | 2909 | udelay (10); |
2909 | if ((++delay % 20) == 0) | 2910 | if ((++delay % 20) == 0) |
2910 | OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); | 2911 | OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); |
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 4e5c8fc35200..635b9ac9aaa1 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c | |||
@@ -1787,6 +1787,8 @@ ath5k_tasklet_rx(unsigned long data) | |||
1787 | 1787 | ||
1788 | spin_lock(&sc->rxbuflock); | 1788 | spin_lock(&sc->rxbuflock); |
1789 | do { | 1789 | do { |
1790 | rxs.flag = 0; | ||
1791 | |||
1790 | if (unlikely(list_empty(&sc->rxbuf))) { | 1792 | if (unlikely(list_empty(&sc->rxbuf))) { |
1791 | ATH5K_WARN(sc, "empty rx buf pool\n"); | 1793 | ATH5K_WARN(sc, "empty rx buf pool\n"); |
1792 | break; | 1794 | break; |
diff --git a/drivers/net/wireless/ath5k/hw.c b/drivers/net/wireless/ath5k/hw.c index 5fb1ae6ad3e2..77990b56860b 100644 --- a/drivers/net/wireless/ath5k/hw.c +++ b/drivers/net/wireless/ath5k/hw.c | |||
@@ -4119,6 +4119,7 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, | |||
4119 | rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, | 4119 | rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, |
4120 | AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); | 4120 | AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); |
4121 | rs->rs_status = 0; | 4121 | rs->rs_status = 0; |
4122 | rs->rs_phyerr = 0; | ||
4122 | 4123 | ||
4123 | /* | 4124 | /* |
4124 | * Key table status | 4125 | * Key table status |
@@ -4145,7 +4146,7 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, | |||
4145 | if (rx_status->rx_status_1 & | 4146 | if (rx_status->rx_status_1 & |
4146 | AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) { | 4147 | AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) { |
4147 | rs->rs_status |= AR5K_RXERR_PHY; | 4148 | rs->rs_status |= AR5K_RXERR_PHY; |
4148 | rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1, | 4149 | rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1, |
4149 | AR5K_5210_RX_DESC_STATUS1_PHY_ERROR); | 4150 | AR5K_5210_RX_DESC_STATUS1_PHY_ERROR); |
4150 | } | 4151 | } |
4151 | 4152 | ||
@@ -4193,6 +4194,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, | |||
4193 | rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, | 4194 | rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, |
4194 | AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); | 4195 | AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); |
4195 | rs->rs_status = 0; | 4196 | rs->rs_status = 0; |
4197 | rs->rs_phyerr = 0; | ||
4196 | 4198 | ||
4197 | /* | 4199 | /* |
4198 | * Key table status | 4200 | * Key table status |
@@ -4215,7 +4217,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, | |||
4215 | if (rx_status->rx_status_1 & | 4217 | if (rx_status->rx_status_1 & |
4216 | AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) { | 4218 | AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) { |
4217 | rs->rs_status |= AR5K_RXERR_PHY; | 4219 | rs->rs_status |= AR5K_RXERR_PHY; |
4218 | rs->rs_phyerr = AR5K_REG_MS(rx_err->rx_error_1, | 4220 | rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1, |
4219 | AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE); | 4221 | AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE); |
4220 | } | 4222 | } |
4221 | 4223 | ||
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig index f51b2d9b085b..1fa043d1802c 100644 --- a/drivers/net/wireless/b43/Kconfig +++ b/drivers/net/wireless/b43/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config B43 | 1 | config B43 |
2 | tristate "Broadcom 43xx wireless support (mac80211 stack)" | 2 | tristate "Broadcom 43xx wireless support (mac80211 stack)" |
3 | depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 | 3 | depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 && HAS_DMA |
4 | select SSB | 4 | select SSB |
5 | select FW_LOADER | 5 | select FW_LOADER |
6 | select HW_RANDOM | 6 | select HW_RANDOM |
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index 37783cdd301a..d3db298c05fc 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h | |||
@@ -630,7 +630,6 @@ struct b43_pio { | |||
630 | 630 | ||
631 | /* Context information for a noise calculation (Link Quality). */ | 631 | /* Context information for a noise calculation (Link Quality). */ |
632 | struct b43_noise_calculation { | 632 | struct b43_noise_calculation { |
633 | u8 channel_at_start; | ||
634 | bool calculation_running; | 633 | bool calculation_running; |
635 | u8 nr_samples; | 634 | u8 nr_samples; |
636 | s8 samples[8][4]; | 635 | s8 samples[8][4]; |
@@ -737,6 +736,7 @@ struct b43_wl { | |||
737 | struct ieee80211_tx_control beacon_txctl; | 736 | struct ieee80211_tx_control beacon_txctl; |
738 | bool beacon0_uploaded; | 737 | bool beacon0_uploaded; |
739 | bool beacon1_uploaded; | 738 | bool beacon1_uploaded; |
739 | bool beacon_templates_virgin; /* Never wrote the templates? */ | ||
740 | struct work_struct beacon_update_trigger; | 740 | struct work_struct beacon_update_trigger; |
741 | 741 | ||
742 | /* The current QOS parameters for the 4 queues. | 742 | /* The current QOS parameters for the 4 queues. |
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 6dcbb3c87e72..e23f2f172bd7 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -795,24 +795,49 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
795 | { | 795 | { |
796 | struct b43_dmaring *ring; | 796 | struct b43_dmaring *ring; |
797 | int err; | 797 | int err; |
798 | int nr_slots; | ||
799 | dma_addr_t dma_test; | 798 | dma_addr_t dma_test; |
800 | 799 | ||
801 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | 800 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
802 | if (!ring) | 801 | if (!ring) |
803 | goto out; | 802 | goto out; |
804 | ring->type = type; | ||
805 | 803 | ||
806 | nr_slots = B43_RXRING_SLOTS; | 804 | ring->nr_slots = B43_RXRING_SLOTS; |
807 | if (for_tx) | 805 | if (for_tx) |
808 | nr_slots = B43_TXRING_SLOTS; | 806 | ring->nr_slots = B43_TXRING_SLOTS; |
809 | 807 | ||
810 | ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta), | 808 | ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta), |
811 | GFP_KERNEL); | 809 | GFP_KERNEL); |
812 | if (!ring->meta) | 810 | if (!ring->meta) |
813 | goto err_kfree_ring; | 811 | goto err_kfree_ring; |
812 | |||
813 | ring->type = type; | ||
814 | ring->dev = dev; | ||
815 | ring->mmio_base = b43_dmacontroller_base(type, controller_index); | ||
816 | ring->index = controller_index; | ||
817 | if (type == B43_DMA_64BIT) | ||
818 | ring->ops = &dma64_ops; | ||
819 | else | ||
820 | ring->ops = &dma32_ops; | ||
814 | if (for_tx) { | 821 | if (for_tx) { |
815 | ring->txhdr_cache = kcalloc(nr_slots, | 822 | ring->tx = 1; |
823 | ring->current_slot = -1; | ||
824 | } else { | ||
825 | if (ring->index == 0) { | ||
826 | ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE; | ||
827 | ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET; | ||
828 | } else if (ring->index == 3) { | ||
829 | ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE; | ||
830 | ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET; | ||
831 | } else | ||
832 | B43_WARN_ON(1); | ||
833 | } | ||
834 | spin_lock_init(&ring->lock); | ||
835 | #ifdef CONFIG_B43_DEBUG | ||
836 | ring->last_injected_overflow = jiffies; | ||
837 | #endif | ||
838 | |||
839 | if (for_tx) { | ||
840 | ring->txhdr_cache = kcalloc(ring->nr_slots, | ||
816 | b43_txhdr_size(dev), | 841 | b43_txhdr_size(dev), |
817 | GFP_KERNEL); | 842 | GFP_KERNEL); |
818 | if (!ring->txhdr_cache) | 843 | if (!ring->txhdr_cache) |
@@ -828,7 +853,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
828 | b43_txhdr_size(dev), 1)) { | 853 | b43_txhdr_size(dev), 1)) { |
829 | /* ugh realloc */ | 854 | /* ugh realloc */ |
830 | kfree(ring->txhdr_cache); | 855 | kfree(ring->txhdr_cache); |
831 | ring->txhdr_cache = kcalloc(nr_slots, | 856 | ring->txhdr_cache = kcalloc(ring->nr_slots, |
832 | b43_txhdr_size(dev), | 857 | b43_txhdr_size(dev), |
833 | GFP_KERNEL | GFP_DMA); | 858 | GFP_KERNEL | GFP_DMA); |
834 | if (!ring->txhdr_cache) | 859 | if (!ring->txhdr_cache) |
@@ -853,32 +878,6 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
853 | DMA_TO_DEVICE); | 878 | DMA_TO_DEVICE); |
854 | } | 879 | } |
855 | 880 | ||
856 | ring->dev = dev; | ||
857 | ring->nr_slots = nr_slots; | ||
858 | ring->mmio_base = b43_dmacontroller_base(type, controller_index); | ||
859 | ring->index = controller_index; | ||
860 | if (type == B43_DMA_64BIT) | ||
861 | ring->ops = &dma64_ops; | ||
862 | else | ||
863 | ring->ops = &dma32_ops; | ||
864 | if (for_tx) { | ||
865 | ring->tx = 1; | ||
866 | ring->current_slot = -1; | ||
867 | } else { | ||
868 | if (ring->index == 0) { | ||
869 | ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE; | ||
870 | ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET; | ||
871 | } else if (ring->index == 3) { | ||
872 | ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE; | ||
873 | ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET; | ||
874 | } else | ||
875 | B43_WARN_ON(1); | ||
876 | } | ||
877 | spin_lock_init(&ring->lock); | ||
878 | #ifdef CONFIG_B43_DEBUG | ||
879 | ring->last_injected_overflow = jiffies; | ||
880 | #endif | ||
881 | |||
882 | err = alloc_ringmemory(ring); | 881 | err = alloc_ringmemory(ring); |
883 | if (err) | 882 | if (err) |
884 | goto err_kfree_txhdr_cache; | 883 | goto err_kfree_txhdr_cache; |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 8fdba9415c04..fa4b0d8b74a2 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -1145,7 +1145,6 @@ static void b43_generate_noise_sample(struct b43_wldev *dev) | |||
1145 | b43_jssi_write(dev, 0x7F7F7F7F); | 1145 | b43_jssi_write(dev, 0x7F7F7F7F); |
1146 | b43_write32(dev, B43_MMIO_MACCMD, | 1146 | b43_write32(dev, B43_MMIO_MACCMD, |
1147 | b43_read32(dev, B43_MMIO_MACCMD) | B43_MACCMD_BGNOISE); | 1147 | b43_read32(dev, B43_MMIO_MACCMD) | B43_MACCMD_BGNOISE); |
1148 | B43_WARN_ON(dev->noisecalc.channel_at_start != dev->phy.channel); | ||
1149 | } | 1148 | } |
1150 | 1149 | ||
1151 | static void b43_calculate_link_quality(struct b43_wldev *dev) | 1150 | static void b43_calculate_link_quality(struct b43_wldev *dev) |
@@ -1154,7 +1153,6 @@ static void b43_calculate_link_quality(struct b43_wldev *dev) | |||
1154 | 1153 | ||
1155 | if (dev->noisecalc.calculation_running) | 1154 | if (dev->noisecalc.calculation_running) |
1156 | return; | 1155 | return; |
1157 | dev->noisecalc.channel_at_start = dev->phy.channel; | ||
1158 | dev->noisecalc.calculation_running = 1; | 1156 | dev->noisecalc.calculation_running = 1; |
1159 | dev->noisecalc.nr_samples = 0; | 1157 | dev->noisecalc.nr_samples = 0; |
1160 | 1158 | ||
@@ -1171,9 +1169,16 @@ static void handle_irq_noise(struct b43_wldev *dev) | |||
1171 | 1169 | ||
1172 | /* Bottom half of Link Quality calculation. */ | 1170 | /* Bottom half of Link Quality calculation. */ |
1173 | 1171 | ||
1172 | /* Possible race condition: It might be possible that the user | ||
1173 | * changed to a different channel in the meantime since we | ||
1174 | * started the calculation. We ignore that fact, since it's | ||
1175 | * not really that much of a problem. The background noise is | ||
1176 | * an estimation only anyway. Slightly wrong results will get damped | ||
1177 | * by the averaging of the 8 sample rounds. Additionally the | ||
1178 | * value is shortlived. So it will be replaced by the next noise | ||
1179 | * calculation round soon. */ | ||
1180 | |||
1174 | B43_WARN_ON(!dev->noisecalc.calculation_running); | 1181 | B43_WARN_ON(!dev->noisecalc.calculation_running); |
1175 | if (dev->noisecalc.channel_at_start != phy->channel) | ||
1176 | goto drop_calculation; | ||
1177 | *((__le32 *)noise) = cpu_to_le32(b43_jssi_read(dev)); | 1182 | *((__le32 *)noise) = cpu_to_le32(b43_jssi_read(dev)); |
1178 | if (noise[0] == 0x7F || noise[1] == 0x7F || | 1183 | if (noise[0] == 0x7F || noise[1] == 0x7F || |
1179 | noise[2] == 0x7F || noise[3] == 0x7F) | 1184 | noise[2] == 0x7F || noise[3] == 0x7F) |
@@ -1214,11 +1219,10 @@ static void handle_irq_noise(struct b43_wldev *dev) | |||
1214 | average -= 48; | 1219 | average -= 48; |
1215 | 1220 | ||
1216 | dev->stats.link_noise = average; | 1221 | dev->stats.link_noise = average; |
1217 | drop_calculation: | ||
1218 | dev->noisecalc.calculation_running = 0; | 1222 | dev->noisecalc.calculation_running = 0; |
1219 | return; | 1223 | return; |
1220 | } | 1224 | } |
1221 | generate_new: | 1225 | generate_new: |
1222 | b43_generate_noise_sample(dev); | 1226 | b43_generate_noise_sample(dev); |
1223 | } | 1227 | } |
1224 | 1228 | ||
@@ -1544,6 +1548,30 @@ static void b43_write_probe_resp_template(struct b43_wldev *dev, | |||
1544 | kfree(probe_resp_data); | 1548 | kfree(probe_resp_data); |
1545 | } | 1549 | } |
1546 | 1550 | ||
1551 | static void b43_upload_beacon0(struct b43_wldev *dev) | ||
1552 | { | ||
1553 | struct b43_wl *wl = dev->wl; | ||
1554 | |||
1555 | if (wl->beacon0_uploaded) | ||
1556 | return; | ||
1557 | b43_write_beacon_template(dev, 0x68, 0x18); | ||
1558 | /* FIXME: Probe resp upload doesn't really belong here, | ||
1559 | * but we don't use that feature anyway. */ | ||
1560 | b43_write_probe_resp_template(dev, 0x268, 0x4A, | ||
1561 | &__b43_ratetable[3]); | ||
1562 | wl->beacon0_uploaded = 1; | ||
1563 | } | ||
1564 | |||
1565 | static void b43_upload_beacon1(struct b43_wldev *dev) | ||
1566 | { | ||
1567 | struct b43_wl *wl = dev->wl; | ||
1568 | |||
1569 | if (wl->beacon1_uploaded) | ||
1570 | return; | ||
1571 | b43_write_beacon_template(dev, 0x468, 0x1A); | ||
1572 | wl->beacon1_uploaded = 1; | ||
1573 | } | ||
1574 | |||
1547 | static void handle_irq_beacon(struct b43_wldev *dev) | 1575 | static void handle_irq_beacon(struct b43_wldev *dev) |
1548 | { | 1576 | { |
1549 | struct b43_wl *wl = dev->wl; | 1577 | struct b43_wl *wl = dev->wl; |
@@ -1568,24 +1596,27 @@ static void handle_irq_beacon(struct b43_wldev *dev) | |||
1568 | return; | 1596 | return; |
1569 | } | 1597 | } |
1570 | 1598 | ||
1571 | if (!beacon0_valid) { | 1599 | if (unlikely(wl->beacon_templates_virgin)) { |
1572 | if (!wl->beacon0_uploaded) { | 1600 | /* We never uploaded a beacon before. |
1573 | b43_write_beacon_template(dev, 0x68, 0x18); | 1601 | * Upload both templates now, but only mark one valid. */ |
1574 | b43_write_probe_resp_template(dev, 0x268, 0x4A, | 1602 | wl->beacon_templates_virgin = 0; |
1575 | &__b43_ratetable[3]); | 1603 | b43_upload_beacon0(dev); |
1576 | wl->beacon0_uploaded = 1; | 1604 | b43_upload_beacon1(dev); |
1577 | } | ||
1578 | cmd = b43_read32(dev, B43_MMIO_MACCMD); | 1605 | cmd = b43_read32(dev, B43_MMIO_MACCMD); |
1579 | cmd |= B43_MACCMD_BEACON0_VALID; | 1606 | cmd |= B43_MACCMD_BEACON0_VALID; |
1580 | b43_write32(dev, B43_MMIO_MACCMD, cmd); | 1607 | b43_write32(dev, B43_MMIO_MACCMD, cmd); |
1581 | } else if (!beacon1_valid) { | 1608 | } else { |
1582 | if (!wl->beacon1_uploaded) { | 1609 | if (!beacon0_valid) { |
1583 | b43_write_beacon_template(dev, 0x468, 0x1A); | 1610 | b43_upload_beacon0(dev); |
1584 | wl->beacon1_uploaded = 1; | 1611 | cmd = b43_read32(dev, B43_MMIO_MACCMD); |
1612 | cmd |= B43_MACCMD_BEACON0_VALID; | ||
1613 | b43_write32(dev, B43_MMIO_MACCMD, cmd); | ||
1614 | } else if (!beacon1_valid) { | ||
1615 | b43_upload_beacon1(dev); | ||
1616 | cmd = b43_read32(dev, B43_MMIO_MACCMD); | ||
1617 | cmd |= B43_MACCMD_BEACON1_VALID; | ||
1618 | b43_write32(dev, B43_MMIO_MACCMD, cmd); | ||
1585 | } | 1619 | } |
1586 | cmd = b43_read32(dev, B43_MMIO_MACCMD); | ||
1587 | cmd |= B43_MACCMD_BEACON1_VALID; | ||
1588 | b43_write32(dev, B43_MMIO_MACCMD, cmd); | ||
1589 | } | 1620 | } |
1590 | } | 1621 | } |
1591 | 1622 | ||
@@ -4073,6 +4104,9 @@ static int b43_op_start(struct ieee80211_hw *hw) | |||
4073 | wl->filter_flags = 0; | 4104 | wl->filter_flags = 0; |
4074 | wl->radiotap_enabled = 0; | 4105 | wl->radiotap_enabled = 0; |
4075 | b43_qos_clear(wl); | 4106 | b43_qos_clear(wl); |
4107 | wl->beacon0_uploaded = 0; | ||
4108 | wl->beacon1_uploaded = 0; | ||
4109 | wl->beacon_templates_virgin = 1; | ||
4076 | 4110 | ||
4077 | /* First register RFkill. | 4111 | /* First register RFkill. |
4078 | * LEDs that are registered later depend on it. */ | 4112 | * LEDs that are registered later depend on it. */ |
@@ -4241,7 +4275,9 @@ static void b43_chip_reset(struct work_struct *work) | |||
4241 | goto out; | 4275 | goto out; |
4242 | } | 4276 | } |
4243 | } | 4277 | } |
4244 | out: | 4278 | out: |
4279 | if (err) | ||
4280 | wl->current_dev = NULL; /* Failed to init the dev. */ | ||
4245 | mutex_unlock(&wl->mutex); | 4281 | mutex_unlock(&wl->mutex); |
4246 | if (err) | 4282 | if (err) |
4247 | b43err(wl, "Controller restart FAILED\n"); | 4283 | b43err(wl, "Controller restart FAILED\n"); |
@@ -4382,9 +4418,11 @@ static void b43_one_core_detach(struct ssb_device *dev) | |||
4382 | struct b43_wldev *wldev; | 4418 | struct b43_wldev *wldev; |
4383 | struct b43_wl *wl; | 4419 | struct b43_wl *wl; |
4384 | 4420 | ||
4421 | /* Do not cancel ieee80211-workqueue based work here. | ||
4422 | * See comment in b43_remove(). */ | ||
4423 | |||
4385 | wldev = ssb_get_drvdata(dev); | 4424 | wldev = ssb_get_drvdata(dev); |
4386 | wl = wldev->wl; | 4425 | wl = wldev->wl; |
4387 | cancel_work_sync(&wldev->restart_work); | ||
4388 | b43_debugfs_remove_device(wldev); | 4426 | b43_debugfs_remove_device(wldev); |
4389 | b43_wireless_core_detach(wldev); | 4427 | b43_wireless_core_detach(wldev); |
4390 | list_del(&wldev->list); | 4428 | list_del(&wldev->list); |
@@ -4569,6 +4607,10 @@ static void b43_remove(struct ssb_device *dev) | |||
4569 | struct b43_wl *wl = ssb_get_devtypedata(dev); | 4607 | struct b43_wl *wl = ssb_get_devtypedata(dev); |
4570 | struct b43_wldev *wldev = ssb_get_drvdata(dev); | 4608 | struct b43_wldev *wldev = ssb_get_drvdata(dev); |
4571 | 4609 | ||
4610 | /* We must cancel any work here before unregistering from ieee80211, | ||
4611 | * as the ieee80211 unreg will destroy the workqueue. */ | ||
4612 | cancel_work_sync(&wldev->restart_work); | ||
4613 | |||
4572 | B43_WARN_ON(!wl); | 4614 | B43_WARN_ON(!wl); |
4573 | if (wl->current_dev == wldev) | 4615 | if (wl->current_dev == wldev) |
4574 | ieee80211_unregister_hw(wl->hw); | 4616 | ieee80211_unregister_hw(wl->hw); |
diff --git a/drivers/net/wireless/b43legacy/Kconfig b/drivers/net/wireless/b43legacy/Kconfig index 13c65faf0247..aef2298d37ac 100644 --- a/drivers/net/wireless/b43legacy/Kconfig +++ b/drivers/net/wireless/b43legacy/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config B43LEGACY | 1 | config B43LEGACY |
2 | tristate "Broadcom 43xx-legacy wireless support (mac80211 stack)" | 2 | tristate "Broadcom 43xx-legacy wireless support (mac80211 stack)" |
3 | depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 | 3 | depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 && HAS_DMA |
4 | select SSB | 4 | select SSB |
5 | select FW_LOADER | 5 | select FW_LOADER |
6 | select HW_RANDOM | 6 | select HW_RANDOM |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 14a5eea2573e..204077c13870 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c | |||
@@ -3039,7 +3039,6 @@ static void b43legacy_set_pretbtt(struct b43legacy_wldev *dev) | |||
3039 | /* Locking: wl->mutex */ | 3039 | /* Locking: wl->mutex */ |
3040 | static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev) | 3040 | static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev) |
3041 | { | 3041 | { |
3042 | struct b43legacy_wl *wl = dev->wl; | ||
3043 | struct b43legacy_phy *phy = &dev->phy; | 3042 | struct b43legacy_phy *phy = &dev->phy; |
3044 | u32 macctl; | 3043 | u32 macctl; |
3045 | 3044 | ||
@@ -3054,12 +3053,6 @@ static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev) | |||
3054 | macctl |= B43legacy_MACCTL_PSM_JMP0; | 3053 | macctl |= B43legacy_MACCTL_PSM_JMP0; |
3055 | b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); | 3054 | b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); |
3056 | 3055 | ||
3057 | mutex_unlock(&wl->mutex); | ||
3058 | /* Must unlock as it would otherwise deadlock. No races here. | ||
3059 | * Cancel possibly pending workqueues. */ | ||
3060 | cancel_work_sync(&dev->restart_work); | ||
3061 | mutex_lock(&wl->mutex); | ||
3062 | |||
3063 | b43legacy_leds_exit(dev); | 3056 | b43legacy_leds_exit(dev); |
3064 | b43legacy_rng_exit(dev->wl); | 3057 | b43legacy_rng_exit(dev->wl); |
3065 | b43legacy_pio_free(dev); | 3058 | b43legacy_pio_free(dev); |
@@ -3486,6 +3479,8 @@ static void b43legacy_chip_reset(struct work_struct *work) | |||
3486 | } | 3479 | } |
3487 | } | 3480 | } |
3488 | out: | 3481 | out: |
3482 | if (err) | ||
3483 | wl->current_dev = NULL; /* Failed to init the dev. */ | ||
3489 | mutex_unlock(&wl->mutex); | 3484 | mutex_unlock(&wl->mutex); |
3490 | if (err) | 3485 | if (err) |
3491 | b43legacyerr(wl, "Controller restart FAILED\n"); | 3486 | b43legacyerr(wl, "Controller restart FAILED\n"); |
@@ -3618,9 +3613,11 @@ static void b43legacy_one_core_detach(struct ssb_device *dev) | |||
3618 | struct b43legacy_wldev *wldev; | 3613 | struct b43legacy_wldev *wldev; |
3619 | struct b43legacy_wl *wl; | 3614 | struct b43legacy_wl *wl; |
3620 | 3615 | ||
3616 | /* Do not cancel ieee80211-workqueue based work here. | ||
3617 | * See comment in b43legacy_remove(). */ | ||
3618 | |||
3621 | wldev = ssb_get_drvdata(dev); | 3619 | wldev = ssb_get_drvdata(dev); |
3622 | wl = wldev->wl; | 3620 | wl = wldev->wl; |
3623 | cancel_work_sync(&wldev->restart_work); | ||
3624 | b43legacy_debugfs_remove_device(wldev); | 3621 | b43legacy_debugfs_remove_device(wldev); |
3625 | b43legacy_wireless_core_detach(wldev); | 3622 | b43legacy_wireless_core_detach(wldev); |
3626 | list_del(&wldev->list); | 3623 | list_del(&wldev->list); |
@@ -3789,6 +3786,10 @@ static void b43legacy_remove(struct ssb_device *dev) | |||
3789 | struct b43legacy_wl *wl = ssb_get_devtypedata(dev); | 3786 | struct b43legacy_wl *wl = ssb_get_devtypedata(dev); |
3790 | struct b43legacy_wldev *wldev = ssb_get_drvdata(dev); | 3787 | struct b43legacy_wldev *wldev = ssb_get_drvdata(dev); |
3791 | 3788 | ||
3789 | /* We must cancel any work here before unregistering from ieee80211, | ||
3790 | * as the ieee80211 unreg will destroy the workqueue. */ | ||
3791 | cancel_work_sync(&wldev->restart_work); | ||
3792 | |||
3792 | B43legacy_WARN_ON(!wl); | 3793 | B43legacy_WARN_ON(!wl); |
3793 | if (wl->current_dev == wldev) | 3794 | if (wl->current_dev == wldev) |
3794 | ieee80211_unregister_hw(wl->hw); | 3795 | ieee80211_unregister_hw(wl->hw); |
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index 437a9bcc9bd3..ed4317a17cbb 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c | |||
@@ -833,6 +833,7 @@ static struct pcmcia_device_id hostap_cs_ids[] = { | |||
833 | PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001), | 833 | PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001), |
834 | PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), | 834 | PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), |
835 | /* PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000), conflict with pcnet_cs */ | 835 | /* PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000), conflict with pcnet_cs */ |
836 | PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), | ||
836 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), | 837 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), |
837 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), | 838 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), |
838 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010), | 839 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010), |
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index 7be68db6f300..cdf90c40f11b 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c | |||
@@ -3276,11 +3276,6 @@ while (0) | |||
3276 | } | 3276 | } |
3277 | printk(KERN_INFO "%s: Registered netdevice %s\n", dev_info, dev->name); | 3277 | printk(KERN_INFO "%s: Registered netdevice %s\n", dev_info, dev->name); |
3278 | 3278 | ||
3279 | #ifndef PRISM2_NO_PROCFS_DEBUG | ||
3280 | create_proc_read_entry("registers", 0, local->proc, | ||
3281 | prism2_registers_proc_read, local); | ||
3282 | #endif /* PRISM2_NO_PROCFS_DEBUG */ | ||
3283 | |||
3284 | hostap_init_data(local); | 3279 | hostap_init_data(local); |
3285 | return dev; | 3280 | return dev; |
3286 | 3281 | ||
@@ -3307,6 +3302,10 @@ static int hostap_hw_ready(struct net_device *dev) | |||
3307 | netif_carrier_off(local->ddev); | 3302 | netif_carrier_off(local->ddev); |
3308 | } | 3303 | } |
3309 | hostap_init_proc(local); | 3304 | hostap_init_proc(local); |
3305 | #ifndef PRISM2_NO_PROCFS_DEBUG | ||
3306 | create_proc_read_entry("registers", 0, local->proc, | ||
3307 | prism2_registers_proc_read, local); | ||
3308 | #endif /* PRISM2_NO_PROCFS_DEBUG */ | ||
3310 | hostap_init_ap_proc(local); | 3309 | hostap_init_ap_proc(local); |
3311 | return 0; | 3310 | return 0; |
3312 | } | 3311 | } |
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 20d387f6658c..f7aec9309d04 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c | |||
@@ -682,7 +682,13 @@ static int prism2_close(struct net_device *dev) | |||
682 | netif_device_detach(dev); | 682 | netif_device_detach(dev); |
683 | } | 683 | } |
684 | 684 | ||
685 | flush_scheduled_work(); | 685 | cancel_work_sync(&local->reset_queue); |
686 | cancel_work_sync(&local->set_multicast_list_queue); | ||
687 | cancel_work_sync(&local->set_tim_queue); | ||
688 | #ifndef PRISM2_NO_STATION_MODES | ||
689 | cancel_work_sync(&local->info_queue); | ||
690 | #endif | ||
691 | cancel_work_sync(&local->comms_qual_update); | ||
686 | 692 | ||
687 | module_put(local->hw_module); | 693 | module_put(local->hw_module); |
688 | 694 | ||
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index fa87c5c2ae0b..6e704608947c 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -1753,6 +1753,8 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) | |||
1753 | 1753 | ||
1754 | if (priv->workqueue) { | 1754 | if (priv->workqueue) { |
1755 | cancel_delayed_work(&priv->request_scan); | 1755 | cancel_delayed_work(&priv->request_scan); |
1756 | cancel_delayed_work(&priv->request_direct_scan); | ||
1757 | cancel_delayed_work(&priv->request_passive_scan); | ||
1756 | cancel_delayed_work(&priv->scan_event); | 1758 | cancel_delayed_work(&priv->scan_event); |
1757 | } | 1759 | } |
1758 | queue_work(priv->workqueue, &priv->down); | 1760 | queue_work(priv->workqueue, &priv->down); |
@@ -2005,6 +2007,8 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) | |||
2005 | wake_up_interruptible(&priv->wait_command_queue); | 2007 | wake_up_interruptible(&priv->wait_command_queue); |
2006 | priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); | 2008 | priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); |
2007 | cancel_delayed_work(&priv->request_scan); | 2009 | cancel_delayed_work(&priv->request_scan); |
2010 | cancel_delayed_work(&priv->request_direct_scan); | ||
2011 | cancel_delayed_work(&priv->request_passive_scan); | ||
2008 | cancel_delayed_work(&priv->scan_event); | 2012 | cancel_delayed_work(&priv->scan_event); |
2009 | schedule_work(&priv->link_down); | 2013 | schedule_work(&priv->link_down); |
2010 | queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); | 2014 | queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); |
@@ -4712,6 +4716,12 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4712 | priv->status &= ~STATUS_SCAN_FORCED; | 4716 | priv->status &= ~STATUS_SCAN_FORCED; |
4713 | #endif /* CONFIG_IPW2200_MONITOR */ | 4717 | #endif /* CONFIG_IPW2200_MONITOR */ |
4714 | 4718 | ||
4719 | /* Do queued direct scans first */ | ||
4720 | if (priv->status & STATUS_DIRECT_SCAN_PENDING) { | ||
4721 | queue_delayed_work(priv->workqueue, | ||
4722 | &priv->request_direct_scan, 0); | ||
4723 | } | ||
4724 | |||
4715 | if (!(priv->status & (STATUS_ASSOCIATED | | 4725 | if (!(priv->status & (STATUS_ASSOCIATED | |
4716 | STATUS_ASSOCIATING | | 4726 | STATUS_ASSOCIATING | |
4717 | STATUS_ROAMING | | 4727 | STATUS_ROAMING | |
@@ -6267,7 +6277,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv, | |||
6267 | } | 6277 | } |
6268 | } | 6278 | } |
6269 | 6279 | ||
6270 | static int ipw_request_scan_helper(struct ipw_priv *priv, int type) | 6280 | static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct) |
6271 | { | 6281 | { |
6272 | struct ipw_scan_request_ext scan; | 6282 | struct ipw_scan_request_ext scan; |
6273 | int err = 0, scan_type; | 6283 | int err = 0, scan_type; |
@@ -6278,22 +6288,31 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type) | |||
6278 | 6288 | ||
6279 | mutex_lock(&priv->mutex); | 6289 | mutex_lock(&priv->mutex); |
6280 | 6290 | ||
6291 | if (direct && (priv->direct_scan_ssid_len == 0)) { | ||
6292 | IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n"); | ||
6293 | priv->status &= ~STATUS_DIRECT_SCAN_PENDING; | ||
6294 | goto done; | ||
6295 | } | ||
6296 | |||
6281 | if (priv->status & STATUS_SCANNING) { | 6297 | if (priv->status & STATUS_SCANNING) { |
6282 | IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n"); | 6298 | IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n"); |
6283 | priv->status |= STATUS_SCAN_PENDING; | 6299 | priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : |
6300 | STATUS_SCAN_PENDING; | ||
6284 | goto done; | 6301 | goto done; |
6285 | } | 6302 | } |
6286 | 6303 | ||
6287 | if (!(priv->status & STATUS_SCAN_FORCED) && | 6304 | if (!(priv->status & STATUS_SCAN_FORCED) && |
6288 | priv->status & STATUS_SCAN_ABORTING) { | 6305 | priv->status & STATUS_SCAN_ABORTING) { |
6289 | IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n"); | 6306 | IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n"); |
6290 | priv->status |= STATUS_SCAN_PENDING; | 6307 | priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : |
6308 | STATUS_SCAN_PENDING; | ||
6291 | goto done; | 6309 | goto done; |
6292 | } | 6310 | } |
6293 | 6311 | ||
6294 | if (priv->status & STATUS_RF_KILL_MASK) { | 6312 | if (priv->status & STATUS_RF_KILL_MASK) { |
6295 | IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n"); | 6313 | IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n"); |
6296 | priv->status |= STATUS_SCAN_PENDING; | 6314 | priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : |
6315 | STATUS_SCAN_PENDING; | ||
6297 | goto done; | 6316 | goto done; |
6298 | } | 6317 | } |
6299 | 6318 | ||
@@ -6321,6 +6340,7 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type) | |||
6321 | cpu_to_le16(20); | 6340 | cpu_to_le16(20); |
6322 | 6341 | ||
6323 | scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120); | 6342 | scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120); |
6343 | scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20); | ||
6324 | 6344 | ||
6325 | #ifdef CONFIG_IPW2200_MONITOR | 6345 | #ifdef CONFIG_IPW2200_MONITOR |
6326 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { | 6346 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { |
@@ -6360,13 +6380,23 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type) | |||
6360 | cpu_to_le16(2000); | 6380 | cpu_to_le16(2000); |
6361 | } else { | 6381 | } else { |
6362 | #endif /* CONFIG_IPW2200_MONITOR */ | 6382 | #endif /* CONFIG_IPW2200_MONITOR */ |
6363 | /* If we are roaming, then make this a directed scan for the | 6383 | /* Honor direct scans first, otherwise if we are roaming make |
6364 | * current network. Otherwise, ensure that every other scan | 6384 | * this a direct scan for the current network. Finally, |
6365 | * is a fast channel hop scan */ | 6385 | * ensure that every other scan is a fast channel hop scan */ |
6366 | if ((priv->status & STATUS_ROAMING) | 6386 | if (direct) { |
6367 | || (!(priv->status & STATUS_ASSOCIATED) | 6387 | err = ipw_send_ssid(priv, priv->direct_scan_ssid, |
6368 | && (priv->config & CFG_STATIC_ESSID) | 6388 | priv->direct_scan_ssid_len); |
6369 | && (le32_to_cpu(scan.full_scan_index) % 2))) { | 6389 | if (err) { |
6390 | IPW_DEBUG_HC("Attempt to send SSID command " | ||
6391 | "failed\n"); | ||
6392 | goto done; | ||
6393 | } | ||
6394 | |||
6395 | scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; | ||
6396 | } else if ((priv->status & STATUS_ROAMING) | ||
6397 | || (!(priv->status & STATUS_ASSOCIATED) | ||
6398 | && (priv->config & CFG_STATIC_ESSID) | ||
6399 | && (le32_to_cpu(scan.full_scan_index) % 2))) { | ||
6370 | err = ipw_send_ssid(priv, priv->essid, priv->essid_len); | 6400 | err = ipw_send_ssid(priv, priv->essid, priv->essid_len); |
6371 | if (err) { | 6401 | if (err) { |
6372 | IPW_DEBUG_HC("Attempt to send SSID command " | 6402 | IPW_DEBUG_HC("Attempt to send SSID command " |
@@ -6391,7 +6421,12 @@ send_request: | |||
6391 | } | 6421 | } |
6392 | 6422 | ||
6393 | priv->status |= STATUS_SCANNING; | 6423 | priv->status |= STATUS_SCANNING; |
6394 | priv->status &= ~STATUS_SCAN_PENDING; | 6424 | if (direct) { |
6425 | priv->status &= ~STATUS_DIRECT_SCAN_PENDING; | ||
6426 | priv->direct_scan_ssid_len = 0; | ||
6427 | } else | ||
6428 | priv->status &= ~STATUS_SCAN_PENDING; | ||
6429 | |||
6395 | queue_delayed_work(priv->workqueue, &priv->scan_check, | 6430 | queue_delayed_work(priv->workqueue, &priv->scan_check, |
6396 | IPW_SCAN_CHECK_WATCHDOG); | 6431 | IPW_SCAN_CHECK_WATCHDOG); |
6397 | done: | 6432 | done: |
@@ -6402,15 +6437,22 @@ done: | |||
6402 | static void ipw_request_passive_scan(struct work_struct *work) | 6437 | static void ipw_request_passive_scan(struct work_struct *work) |
6403 | { | 6438 | { |
6404 | struct ipw_priv *priv = | 6439 | struct ipw_priv *priv = |
6405 | container_of(work, struct ipw_priv, request_passive_scan); | 6440 | container_of(work, struct ipw_priv, request_passive_scan.work); |
6406 | ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE); | 6441 | ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0); |
6407 | } | 6442 | } |
6408 | 6443 | ||
6409 | static void ipw_request_scan(struct work_struct *work) | 6444 | static void ipw_request_scan(struct work_struct *work) |
6410 | { | 6445 | { |
6411 | struct ipw_priv *priv = | 6446 | struct ipw_priv *priv = |
6412 | container_of(work, struct ipw_priv, request_scan.work); | 6447 | container_of(work, struct ipw_priv, request_scan.work); |
6413 | ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE); | 6448 | ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0); |
6449 | } | ||
6450 | |||
6451 | static void ipw_request_direct_scan(struct work_struct *work) | ||
6452 | { | ||
6453 | struct ipw_priv *priv = | ||
6454 | container_of(work, struct ipw_priv, request_direct_scan.work); | ||
6455 | ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1); | ||
6414 | } | 6456 | } |
6415 | 6457 | ||
6416 | static void ipw_bg_abort_scan(struct work_struct *work) | 6458 | static void ipw_bg_abort_scan(struct work_struct *work) |
@@ -7558,8 +7600,31 @@ static int ipw_associate(void *data) | |||
7558 | priv->ieee->iw_mode == IW_MODE_ADHOC && | 7600 | priv->ieee->iw_mode == IW_MODE_ADHOC && |
7559 | priv->config & CFG_ADHOC_CREATE && | 7601 | priv->config & CFG_ADHOC_CREATE && |
7560 | priv->config & CFG_STATIC_ESSID && | 7602 | priv->config & CFG_STATIC_ESSID && |
7561 | priv->config & CFG_STATIC_CHANNEL && | 7603 | priv->config & CFG_STATIC_CHANNEL) { |
7562 | !list_empty(&priv->ieee->network_free_list)) { | 7604 | /* Use oldest network if the free list is empty */ |
7605 | if (list_empty(&priv->ieee->network_free_list)) { | ||
7606 | struct ieee80211_network *oldest = NULL; | ||
7607 | struct ieee80211_network *target; | ||
7608 | DECLARE_MAC_BUF(mac); | ||
7609 | |||
7610 | list_for_each_entry(target, &priv->ieee->network_list, list) { | ||
7611 | if ((oldest == NULL) || | ||
7612 | (target->last_scanned < oldest->last_scanned)) | ||
7613 | oldest = target; | ||
7614 | } | ||
7615 | |||
7616 | /* If there are no more slots, expire the oldest */ | ||
7617 | list_del(&oldest->list); | ||
7618 | target = oldest; | ||
7619 | IPW_DEBUG_ASSOC("Expired '%s' (%s) from " | ||
7620 | "network list.\n", | ||
7621 | escape_essid(target->ssid, | ||
7622 | target->ssid_len), | ||
7623 | print_mac(mac, target->bssid)); | ||
7624 | list_add_tail(&target->list, | ||
7625 | &priv->ieee->network_free_list); | ||
7626 | } | ||
7627 | |||
7563 | element = priv->ieee->network_free_list.next; | 7628 | element = priv->ieee->network_free_list.next; |
7564 | network = list_entry(element, struct ieee80211_network, list); | 7629 | network = list_entry(element, struct ieee80211_network, list); |
7565 | ipw_adhoc_create(priv, network); | 7630 | ipw_adhoc_create(priv, network); |
@@ -9454,99 +9519,38 @@ static int ipw_wx_get_retry(struct net_device *dev, | |||
9454 | return 0; | 9519 | return 0; |
9455 | } | 9520 | } |
9456 | 9521 | ||
9457 | static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid, | ||
9458 | int essid_len) | ||
9459 | { | ||
9460 | struct ipw_scan_request_ext scan; | ||
9461 | int err = 0, scan_type; | ||
9462 | |||
9463 | if (!(priv->status & STATUS_INIT) || | ||
9464 | (priv->status & STATUS_EXIT_PENDING)) | ||
9465 | return 0; | ||
9466 | |||
9467 | mutex_lock(&priv->mutex); | ||
9468 | |||
9469 | if (priv->status & STATUS_RF_KILL_MASK) { | ||
9470 | IPW_DEBUG_HC("Aborting scan due to RF kill activation\n"); | ||
9471 | priv->status |= STATUS_SCAN_PENDING; | ||
9472 | goto done; | ||
9473 | } | ||
9474 | |||
9475 | IPW_DEBUG_HC("starting request direct scan!\n"); | ||
9476 | |||
9477 | if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { | ||
9478 | /* We should not sleep here; otherwise we will block most | ||
9479 | * of the system (for instance, we hold rtnl_lock when we | ||
9480 | * get here). | ||
9481 | */ | ||
9482 | err = -EAGAIN; | ||
9483 | goto done; | ||
9484 | } | ||
9485 | memset(&scan, 0, sizeof(scan)); | ||
9486 | |||
9487 | if (priv->config & CFG_SPEED_SCAN) | ||
9488 | scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = | ||
9489 | cpu_to_le16(30); | ||
9490 | else | ||
9491 | scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = | ||
9492 | cpu_to_le16(20); | ||
9493 | |||
9494 | scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = | ||
9495 | cpu_to_le16(20); | ||
9496 | scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120); | ||
9497 | scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20); | ||
9498 | |||
9499 | scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee)); | ||
9500 | |||
9501 | err = ipw_send_ssid(priv, essid, essid_len); | ||
9502 | if (err) { | ||
9503 | IPW_DEBUG_HC("Attempt to send SSID command failed\n"); | ||
9504 | goto done; | ||
9505 | } | ||
9506 | scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; | ||
9507 | |||
9508 | ipw_add_scan_channels(priv, &scan, scan_type); | ||
9509 | |||
9510 | err = ipw_send_scan_request_ext(priv, &scan); | ||
9511 | if (err) { | ||
9512 | IPW_DEBUG_HC("Sending scan command failed: %08X\n", err); | ||
9513 | goto done; | ||
9514 | } | ||
9515 | |||
9516 | priv->status |= STATUS_SCANNING; | ||
9517 | |||
9518 | done: | ||
9519 | mutex_unlock(&priv->mutex); | ||
9520 | return err; | ||
9521 | } | ||
9522 | |||
9523 | static int ipw_wx_set_scan(struct net_device *dev, | 9522 | static int ipw_wx_set_scan(struct net_device *dev, |
9524 | struct iw_request_info *info, | 9523 | struct iw_request_info *info, |
9525 | union iwreq_data *wrqu, char *extra) | 9524 | union iwreq_data *wrqu, char *extra) |
9526 | { | 9525 | { |
9527 | struct ipw_priv *priv = ieee80211_priv(dev); | 9526 | struct ipw_priv *priv = ieee80211_priv(dev); |
9528 | struct iw_scan_req *req = (struct iw_scan_req *)extra; | 9527 | struct iw_scan_req *req = (struct iw_scan_req *)extra; |
9528 | struct delayed_work *work = NULL; | ||
9529 | 9529 | ||
9530 | mutex_lock(&priv->mutex); | 9530 | mutex_lock(&priv->mutex); |
9531 | |||
9531 | priv->user_requested_scan = 1; | 9532 | priv->user_requested_scan = 1; |
9532 | mutex_unlock(&priv->mutex); | ||
9533 | 9533 | ||
9534 | if (wrqu->data.length == sizeof(struct iw_scan_req)) { | 9534 | if (wrqu->data.length == sizeof(struct iw_scan_req)) { |
9535 | if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { | 9535 | if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { |
9536 | ipw_request_direct_scan(priv, req->essid, | 9536 | int len = min((int)req->essid_len, |
9537 | req->essid_len); | 9537 | (int)sizeof(priv->direct_scan_ssid)); |
9538 | return 0; | 9538 | memcpy(priv->direct_scan_ssid, req->essid, len); |
9539 | } | 9539 | priv->direct_scan_ssid_len = len; |
9540 | if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { | 9540 | work = &priv->request_direct_scan; |
9541 | queue_work(priv->workqueue, | 9541 | } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { |
9542 | &priv->request_passive_scan); | 9542 | work = &priv->request_passive_scan; |
9543 | return 0; | ||
9544 | } | 9543 | } |
9544 | } else { | ||
9545 | /* Normal active broadcast scan */ | ||
9546 | work = &priv->request_scan; | ||
9545 | } | 9547 | } |
9546 | 9548 | ||
9549 | mutex_unlock(&priv->mutex); | ||
9550 | |||
9547 | IPW_DEBUG_WX("Start scan\n"); | 9551 | IPW_DEBUG_WX("Start scan\n"); |
9548 | 9552 | ||
9549 | queue_delayed_work(priv->workqueue, &priv->request_scan, 0); | 9553 | queue_delayed_work(priv->workqueue, work, 0); |
9550 | 9554 | ||
9551 | return 0; | 9555 | return 0; |
9552 | } | 9556 | } |
@@ -10708,6 +10712,8 @@ static void ipw_link_up(struct ipw_priv *priv) | |||
10708 | } | 10712 | } |
10709 | 10713 | ||
10710 | cancel_delayed_work(&priv->request_scan); | 10714 | cancel_delayed_work(&priv->request_scan); |
10715 | cancel_delayed_work(&priv->request_direct_scan); | ||
10716 | cancel_delayed_work(&priv->request_passive_scan); | ||
10711 | cancel_delayed_work(&priv->scan_event); | 10717 | cancel_delayed_work(&priv->scan_event); |
10712 | ipw_reset_stats(priv); | 10718 | ipw_reset_stats(priv); |
10713 | /* Ensure the rate is updated immediately */ | 10719 | /* Ensure the rate is updated immediately */ |
@@ -10738,6 +10744,8 @@ static void ipw_link_down(struct ipw_priv *priv) | |||
10738 | 10744 | ||
10739 | /* Cancel any queued work ... */ | 10745 | /* Cancel any queued work ... */ |
10740 | cancel_delayed_work(&priv->request_scan); | 10746 | cancel_delayed_work(&priv->request_scan); |
10747 | cancel_delayed_work(&priv->request_direct_scan); | ||
10748 | cancel_delayed_work(&priv->request_passive_scan); | ||
10741 | cancel_delayed_work(&priv->adhoc_check); | 10749 | cancel_delayed_work(&priv->adhoc_check); |
10742 | cancel_delayed_work(&priv->gather_stats); | 10750 | cancel_delayed_work(&priv->gather_stats); |
10743 | 10751 | ||
@@ -10777,8 +10785,9 @@ static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv) | |||
10777 | INIT_WORK(&priv->up, ipw_bg_up); | 10785 | INIT_WORK(&priv->up, ipw_bg_up); |
10778 | INIT_WORK(&priv->down, ipw_bg_down); | 10786 | INIT_WORK(&priv->down, ipw_bg_down); |
10779 | INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); | 10787 | INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); |
10788 | INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan); | ||
10789 | INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan); | ||
10780 | INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event); | 10790 | INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event); |
10781 | INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan); | ||
10782 | INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); | 10791 | INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); |
10783 | INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); | 10792 | INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); |
10784 | INIT_WORK(&priv->roam, ipw_bg_roam); | 10793 | INIT_WORK(&priv->roam, ipw_bg_roam); |
@@ -11584,6 +11593,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv) | |||
11584 | priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit; | 11593 | priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit; |
11585 | 11594 | ||
11586 | priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR; | 11595 | priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR; |
11596 | SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev); | ||
11587 | 11597 | ||
11588 | rc = register_netdev(priv->prom_net_dev); | 11598 | rc = register_netdev(priv->prom_net_dev); |
11589 | if (rc) { | 11599 | if (rc) { |
@@ -11811,6 +11821,8 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev) | |||
11811 | cancel_delayed_work(&priv->adhoc_check); | 11821 | cancel_delayed_work(&priv->adhoc_check); |
11812 | cancel_delayed_work(&priv->gather_stats); | 11822 | cancel_delayed_work(&priv->gather_stats); |
11813 | cancel_delayed_work(&priv->request_scan); | 11823 | cancel_delayed_work(&priv->request_scan); |
11824 | cancel_delayed_work(&priv->request_direct_scan); | ||
11825 | cancel_delayed_work(&priv->request_passive_scan); | ||
11814 | cancel_delayed_work(&priv->scan_event); | 11826 | cancel_delayed_work(&priv->scan_event); |
11815 | cancel_delayed_work(&priv->rf_kill); | 11827 | cancel_delayed_work(&priv->rf_kill); |
11816 | cancel_delayed_work(&priv->scan_check); | 11828 | cancel_delayed_work(&priv->scan_check); |
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h index cd3295b66dd6..d4ab28b73b32 100644 --- a/drivers/net/wireless/ipw2200.h +++ b/drivers/net/wireless/ipw2200.h | |||
@@ -1037,6 +1037,7 @@ struct ipw_cmd { /* XXX */ | |||
1037 | #define STATUS_DISASSOC_PENDING (1<<12) | 1037 | #define STATUS_DISASSOC_PENDING (1<<12) |
1038 | #define STATUS_STATE_PENDING (1<<13) | 1038 | #define STATUS_STATE_PENDING (1<<13) |
1039 | 1039 | ||
1040 | #define STATUS_DIRECT_SCAN_PENDING (1<<19) | ||
1040 | #define STATUS_SCAN_PENDING (1<<20) | 1041 | #define STATUS_SCAN_PENDING (1<<20) |
1041 | #define STATUS_SCANNING (1<<21) | 1042 | #define STATUS_SCANNING (1<<21) |
1042 | #define STATUS_SCAN_ABORTING (1<<22) | 1043 | #define STATUS_SCAN_ABORTING (1<<22) |
@@ -1292,6 +1293,8 @@ struct ipw_priv { | |||
1292 | struct iw_public_data wireless_data; | 1293 | struct iw_public_data wireless_data; |
1293 | 1294 | ||
1294 | int user_requested_scan; | 1295 | int user_requested_scan; |
1296 | u8 direct_scan_ssid[IW_ESSID_MAX_SIZE]; | ||
1297 | u8 direct_scan_ssid_len; | ||
1295 | 1298 | ||
1296 | struct workqueue_struct *workqueue; | 1299 | struct workqueue_struct *workqueue; |
1297 | 1300 | ||
@@ -1301,8 +1304,9 @@ struct ipw_priv { | |||
1301 | struct work_struct system_config; | 1304 | struct work_struct system_config; |
1302 | struct work_struct rx_replenish; | 1305 | struct work_struct rx_replenish; |
1303 | struct delayed_work request_scan; | 1306 | struct delayed_work request_scan; |
1307 | struct delayed_work request_direct_scan; | ||
1308 | struct delayed_work request_passive_scan; | ||
1304 | struct delayed_work scan_event; | 1309 | struct delayed_work scan_event; |
1305 | struct work_struct request_passive_scan; | ||
1306 | struct work_struct adapter_restart; | 1310 | struct work_struct adapter_restart; |
1307 | struct delayed_work rf_kill; | 1311 | struct delayed_work rf_kill; |
1308 | struct work_struct up; | 1312 | struct work_struct up; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c index d200d08fb086..8b1528e52d43 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c | |||
@@ -229,14 +229,15 @@ static int iwl3945_led_register_led(struct iwl3945_priv *priv, | |||
229 | led->led_dev.brightness_set = iwl3945_led_brightness_set; | 229 | led->led_dev.brightness_set = iwl3945_led_brightness_set; |
230 | led->led_dev.default_trigger = trigger; | 230 | led->led_dev.default_trigger = trigger; |
231 | 231 | ||
232 | led->priv = priv; | ||
233 | led->type = type; | ||
234 | |||
232 | ret = led_classdev_register(device, &led->led_dev); | 235 | ret = led_classdev_register(device, &led->led_dev); |
233 | if (ret) { | 236 | if (ret) { |
234 | IWL_ERROR("Error: failed to register led handler.\n"); | 237 | IWL_ERROR("Error: failed to register led handler.\n"); |
235 | return ret; | 238 | return ret; |
236 | } | 239 | } |
237 | 240 | ||
238 | led->priv = priv; | ||
239 | led->type = type; | ||
240 | led->registered = 1; | 241 | led->registered = 1; |
241 | 242 | ||
242 | if (set_led && led->led_on) | 243 | if (set_led && led->led_on) |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c index c9847b1a67f7..3a7f0cb710ec 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c | |||
@@ -1162,7 +1162,6 @@ static s32 rs_get_best_rate(struct iwl_priv *priv, | |||
1162 | 1162 | ||
1163 | /* Higher rate not available, use the original */ | 1163 | /* Higher rate not available, use the original */ |
1164 | } else { | 1164 | } else { |
1165 | new_rate = rate; | ||
1166 | break; | 1165 | break; |
1167 | } | 1166 | } |
1168 | } | 1167 | } |
@@ -2009,7 +2008,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv, | |||
2009 | * 2) Not just finishing up a search | 2008 | * 2) Not just finishing up a search |
2010 | * 3) Allowing a new search | 2009 | * 3) Allowing a new search |
2011 | */ | 2010 | */ |
2012 | if (!update_lq && !done_search && !lq_sta->stay_in_tbl) { | 2011 | if (!update_lq && !done_search && !lq_sta->stay_in_tbl && window->counter) { |
2013 | /* Save current throughput to compare with "search" throughput*/ | 2012 | /* Save current throughput to compare with "search" throughput*/ |
2014 | lq_sta->last_tpt = current_tpt; | 2013 | lq_sta->last_tpt = current_tpt; |
2015 | 2014 | ||
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 6328b9593877..8124fd9b1353 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c | |||
@@ -1842,6 +1842,9 @@ static void lbs_send_confirmsleep(struct lbs_private *priv) | |||
1842 | 1842 | ||
1843 | spin_lock_irqsave(&priv->driver_lock, flags); | 1843 | spin_lock_irqsave(&priv->driver_lock, flags); |
1844 | 1844 | ||
1845 | /* We don't get a response on the sleep-confirmation */ | ||
1846 | priv->dnld_sent = DNLD_RES_RECEIVED; | ||
1847 | |||
1845 | /* If nothing to do, go back to sleep (?) */ | 1848 | /* If nothing to do, go back to sleep (?) */ |
1846 | if (!__kfifo_len(priv->event_fifo) && !priv->resp_len[priv->resp_idx]) | 1849 | if (!__kfifo_len(priv->event_fifo) && !priv->resp_len[priv->resp_idx]) |
1847 | priv->psstate = PS_STATE_SLEEP; | 1850 | priv->psstate = PS_STATE_SLEEP; |
@@ -1904,12 +1907,12 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv) | |||
1904 | 1907 | ||
1905 | lbs_deb_enter(LBS_DEB_HOST); | 1908 | lbs_deb_enter(LBS_DEB_HOST); |
1906 | 1909 | ||
1910 | spin_lock_irqsave(&priv->driver_lock, flags); | ||
1907 | if (priv->dnld_sent) { | 1911 | if (priv->dnld_sent) { |
1908 | allowed = 0; | 1912 | allowed = 0; |
1909 | lbs_deb_host("dnld_sent was set\n"); | 1913 | lbs_deb_host("dnld_sent was set\n"); |
1910 | } | 1914 | } |
1911 | 1915 | ||
1912 | spin_lock_irqsave(&priv->driver_lock, flags); | ||
1913 | /* In-progress command? */ | 1916 | /* In-progress command? */ |
1914 | if (priv->cur_cmd) { | 1917 | if (priv->cur_cmd) { |
1915 | allowed = 0; | 1918 | allowed = 0; |
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index ad2fabca9116..0aa0ce3b2c42 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c | |||
@@ -312,8 +312,8 @@ static ssize_t lbs_threshold_write(uint16_t tlv_type, uint16_t event_mask, | |||
312 | if (tlv_type != TLV_TYPE_BCNMISS) | 312 | if (tlv_type != TLV_TYPE_BCNMISS) |
313 | tlv->freq = freq; | 313 | tlv->freq = freq; |
314 | 314 | ||
315 | /* The command header, the event mask, and the one TLV */ | 315 | /* The command header, the action, the event mask, and one TLV */ |
316 | events->hdr.size = cpu_to_le16(sizeof(events->hdr) + 2 + sizeof(*tlv)); | 316 | events->hdr.size = cpu_to_le16(sizeof(events->hdr) + 4 + sizeof(*tlv)); |
317 | 317 | ||
318 | ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events); | 318 | ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events); |
319 | 319 | ||
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c index dcfdb404678b..688d60de55cb 100644 --- a/drivers/net/wireless/libertas/ethtool.c +++ b/drivers/net/wireless/libertas/ethtool.c | |||
@@ -73,8 +73,8 @@ out: | |||
73 | return ret; | 73 | return ret; |
74 | } | 74 | } |
75 | 75 | ||
76 | static void lbs_ethtool_get_stats(struct net_device * dev, | 76 | static void lbs_ethtool_get_stats(struct net_device *dev, |
77 | struct ethtool_stats * stats, u64 * data) | 77 | struct ethtool_stats *stats, uint64_t *data) |
78 | { | 78 | { |
79 | struct lbs_private *priv = dev->priv; | 79 | struct lbs_private *priv = dev->priv; |
80 | struct cmd_ds_mesh_access mesh_access; | 80 | struct cmd_ds_mesh_access mesh_access; |
@@ -83,12 +83,12 @@ static void lbs_ethtool_get_stats(struct net_device * dev, | |||
83 | lbs_deb_enter(LBS_DEB_ETHTOOL); | 83 | lbs_deb_enter(LBS_DEB_ETHTOOL); |
84 | 84 | ||
85 | /* Get Mesh Statistics */ | 85 | /* Get Mesh Statistics */ |
86 | ret = lbs_prepare_and_send_command(priv, | 86 | ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_STATS, &mesh_access); |
87 | CMD_MESH_ACCESS, CMD_ACT_MESH_GET_STATS, | ||
88 | CMD_OPTION_WAITFORRSP, 0, &mesh_access); | ||
89 | 87 | ||
90 | if (ret) | 88 | if (ret) { |
89 | memset(data, 0, MESH_STATS_NUM*(sizeof(uint64_t))); | ||
91 | return; | 90 | return; |
91 | } | ||
92 | 92 | ||
93 | priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); | 93 | priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); |
94 | priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); | 94 | priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); |
@@ -111,19 +111,18 @@ static void lbs_ethtool_get_stats(struct net_device * dev, | |||
111 | lbs_deb_enter(LBS_DEB_ETHTOOL); | 111 | lbs_deb_enter(LBS_DEB_ETHTOOL); |
112 | } | 112 | } |
113 | 113 | ||
114 | static int lbs_ethtool_get_sset_count(struct net_device * dev, int sset) | 114 | static int lbs_ethtool_get_sset_count(struct net_device *dev, int sset) |
115 | { | 115 | { |
116 | switch (sset) { | 116 | struct lbs_private *priv = dev->priv; |
117 | case ETH_SS_STATS: | 117 | |
118 | if (sset == ETH_SS_STATS && dev == priv->mesh_dev) | ||
118 | return MESH_STATS_NUM; | 119 | return MESH_STATS_NUM; |
119 | default: | 120 | |
120 | return -EOPNOTSUPP; | 121 | return -EOPNOTSUPP; |
121 | } | ||
122 | } | 122 | } |
123 | 123 | ||
124 | static void lbs_ethtool_get_strings(struct net_device *dev, | 124 | static void lbs_ethtool_get_strings(struct net_device *dev, |
125 | u32 stringset, | 125 | uint32_t stringset, uint8_t *s) |
126 | u8 * s) | ||
127 | { | 126 | { |
128 | int i; | 127 | int i; |
129 | 128 | ||
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 406f54d40956..acfc4bfcc262 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c | |||
@@ -732,8 +732,8 @@ static int lbs_thread(void *data) | |||
732 | lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n", | 732 | lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n", |
733 | priv->currenttxskb, priv->dnld_sent); | 733 | priv->currenttxskb, priv->dnld_sent); |
734 | 734 | ||
735 | spin_lock_irq(&priv->driver_lock); | ||
736 | /* Process any pending command response */ | 735 | /* Process any pending command response */ |
736 | spin_lock_irq(&priv->driver_lock); | ||
737 | resp_idx = priv->resp_idx; | 737 | resp_idx = priv->resp_idx; |
738 | if (priv->resp_len[resp_idx]) { | 738 | if (priv->resp_len[resp_idx]) { |
739 | spin_unlock_irq(&priv->driver_lock); | 739 | spin_unlock_irq(&priv->driver_lock); |
@@ -756,6 +756,7 @@ static int lbs_thread(void *data) | |||
756 | priv->nr_retries = 0; | 756 | priv->nr_retries = 0; |
757 | } else { | 757 | } else { |
758 | priv->cur_cmd = NULL; | 758 | priv->cur_cmd = NULL; |
759 | priv->dnld_sent = DNLD_RES_RECEIVED; | ||
759 | lbs_pr_info("requeueing command %x due to timeout (#%d)\n", | 760 | lbs_pr_info("requeueing command %x due to timeout (#%d)\n", |
760 | le16_to_cpu(cmdnode->cmdbuf->command), priv->nr_retries); | 761 | le16_to_cpu(cmdnode->cmdbuf->command), priv->nr_retries); |
761 | 762 | ||
@@ -1564,6 +1565,7 @@ static int lbs_add_rtap(struct lbs_private *priv) | |||
1564 | rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit; | 1565 | rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit; |
1565 | rtap_dev->set_multicast_list = lbs_set_multicast_list; | 1566 | rtap_dev->set_multicast_list = lbs_set_multicast_list; |
1566 | rtap_dev->priv = priv; | 1567 | rtap_dev->priv = priv; |
1568 | SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent); | ||
1567 | 1569 | ||
1568 | ret = register_netdev(rtap_dev); | 1570 | ret = register_netdev(rtap_dev); |
1569 | if (ret) { | 1571 | if (ret) { |
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c index 8b7f5768a103..1c216e015f64 100644 --- a/drivers/net/wireless/orinoco_cs.c +++ b/drivers/net/wireless/orinoco_cs.c | |||
@@ -461,6 +461,7 @@ static struct pcmcia_device_id orinoco_cs_ids[] = { | |||
461 | PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */ | 461 | PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */ |
462 | PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */ | 462 | PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */ |
463 | PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */ | 463 | PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */ |
464 | PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */ | ||
464 | PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */ | 465 | PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */ |
465 | PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */ | 466 | PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */ |
466 | PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */ | 467 | PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */ |
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 98ddbb3b3273..1610a7308c1d 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c | |||
@@ -49,6 +49,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { | |||
49 | {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ | 49 | {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ |
50 | 50 | ||
51 | /* Version 2 devices (3887) */ | 51 | /* Version 2 devices (3887) */ |
52 | {USB_DEVICE(0x0471, 0x1230)}, /* Philips CPWUA054/00 */ | ||
52 | {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */ | 53 | {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */ |
53 | {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */ | 54 | {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */ |
54 | {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */ | 55 | {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */ |
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index d0b1fb15c709..18c9931e3267 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c | |||
@@ -116,6 +116,7 @@ MODULE_PARM_DESC(workaround_interval, | |||
116 | #define OID_802_11_ENCRYPTION_STATUS ccpu2(0x0d01011b) | 116 | #define OID_802_11_ENCRYPTION_STATUS ccpu2(0x0d01011b) |
117 | #define OID_802_11_ADD_KEY ccpu2(0x0d01011d) | 117 | #define OID_802_11_ADD_KEY ccpu2(0x0d01011d) |
118 | #define OID_802_11_REMOVE_KEY ccpu2(0x0d01011e) | 118 | #define OID_802_11_REMOVE_KEY ccpu2(0x0d01011e) |
119 | #define OID_802_11_ASSOCIATION_INFORMATION ccpu2(0x0d01011f) | ||
119 | #define OID_802_11_PMKID ccpu2(0x0d010123) | 120 | #define OID_802_11_PMKID ccpu2(0x0d010123) |
120 | #define OID_802_11_NETWORK_TYPES_SUPPORTED ccpu2(0x0d010203) | 121 | #define OID_802_11_NETWORK_TYPES_SUPPORTED ccpu2(0x0d010203) |
121 | #define OID_802_11_NETWORK_TYPE_IN_USE ccpu2(0x0d010204) | 122 | #define OID_802_11_NETWORK_TYPE_IN_USE ccpu2(0x0d010204) |
@@ -271,6 +272,26 @@ struct ndis_config_param { | |||
271 | __le32 value_length; | 272 | __le32 value_length; |
272 | } __attribute__((packed)); | 273 | } __attribute__((packed)); |
273 | 274 | ||
275 | struct ndis_80211_assoc_info { | ||
276 | __le32 length; | ||
277 | __le16 req_ies; | ||
278 | struct req_ie { | ||
279 | __le16 capa; | ||
280 | __le16 listen_interval; | ||
281 | u8 cur_ap_address[6]; | ||
282 | } req_ie; | ||
283 | __le32 req_ie_length; | ||
284 | __le32 offset_req_ies; | ||
285 | __le16 resp_ies; | ||
286 | struct resp_ie { | ||
287 | __le16 capa; | ||
288 | __le16 status_code; | ||
289 | __le16 assoc_id; | ||
290 | } resp_ie; | ||
291 | __le32 resp_ie_length; | ||
292 | __le32 offset_resp_ies; | ||
293 | } __attribute__((packed)); | ||
294 | |||
274 | /* these have to match what is in wpa_supplicant */ | 295 | /* these have to match what is in wpa_supplicant */ |
275 | enum wpa_alg { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP }; | 296 | enum wpa_alg { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP }; |
276 | enum wpa_cipher { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP, | 297 | enum wpa_cipher { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP, |
@@ -674,6 +695,12 @@ static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) | |||
674 | return ret; | 695 | return ret; |
675 | } | 696 | } |
676 | 697 | ||
698 | static int get_association_info(struct usbnet *usbdev, | ||
699 | struct ndis_80211_assoc_info *info, int len) | ||
700 | { | ||
701 | return rndis_query_oid(usbdev, OID_802_11_ASSOCIATION_INFORMATION, | ||
702 | info, &len); | ||
703 | } | ||
677 | 704 | ||
678 | static int is_associated(struct usbnet *usbdev) | 705 | static int is_associated(struct usbnet *usbdev) |
679 | { | 706 | { |
@@ -2182,11 +2209,40 @@ static void rndis_wext_worker(struct work_struct *work) | |||
2182 | struct usbnet *usbdev = priv->usbdev; | 2209 | struct usbnet *usbdev = priv->usbdev; |
2183 | union iwreq_data evt; | 2210 | union iwreq_data evt; |
2184 | unsigned char bssid[ETH_ALEN]; | 2211 | unsigned char bssid[ETH_ALEN]; |
2185 | int ret; | 2212 | struct ndis_80211_assoc_info *info; |
2213 | int assoc_size = sizeof(*info) + IW_CUSTOM_MAX + 32; | ||
2214 | int ret, offset; | ||
2186 | 2215 | ||
2187 | if (test_and_clear_bit(WORK_CONNECTION_EVENT, &priv->work_pending)) { | 2216 | if (test_and_clear_bit(WORK_CONNECTION_EVENT, &priv->work_pending)) { |
2188 | ret = get_bssid(usbdev, bssid); | 2217 | info = kzalloc(assoc_size, GFP_KERNEL); |
2218 | if (!info) | ||
2219 | goto get_bssid; | ||
2220 | |||
2221 | /* Get association info IEs from device and send them back to | ||
2222 | * userspace. */ | ||
2223 | ret = get_association_info(usbdev, info, assoc_size); | ||
2224 | if (!ret) { | ||
2225 | evt.data.length = le32_to_cpu(info->req_ie_length); | ||
2226 | if (evt.data.length > 0) { | ||
2227 | offset = le32_to_cpu(info->offset_req_ies); | ||
2228 | wireless_send_event(usbdev->net, | ||
2229 | IWEVASSOCREQIE, &evt, | ||
2230 | (char *)info + offset); | ||
2231 | } | ||
2232 | |||
2233 | evt.data.length = le32_to_cpu(info->resp_ie_length); | ||
2234 | if (evt.data.length > 0) { | ||
2235 | offset = le32_to_cpu(info->offset_resp_ies); | ||
2236 | wireless_send_event(usbdev->net, | ||
2237 | IWEVASSOCRESPIE, &evt, | ||
2238 | (char *)info + offset); | ||
2239 | } | ||
2240 | } | ||
2241 | |||
2242 | kfree(info); | ||
2189 | 2243 | ||
2244 | get_bssid: | ||
2245 | ret = get_bssid(usbdev, bssid); | ||
2190 | if (!ret) { | 2246 | if (!ret) { |
2191 | evt.data.flags = 0; | 2247 | evt.data.flags = 0; |
2192 | evt.data.length = 0; | 2248 | evt.data.length = 0; |
@@ -2414,6 +2470,11 @@ static int bcm4320_early_init(struct usbnet *dev) | |||
2414 | else if (priv->param_power_save > 2) | 2470 | else if (priv->param_power_save > 2) |
2415 | priv->param_power_save = 2; | 2471 | priv->param_power_save = 2; |
2416 | 2472 | ||
2473 | if (priv->param_power_output < 0) | ||
2474 | priv->param_power_output = 0; | ||
2475 | else if (priv->param_power_output > 3) | ||
2476 | priv->param_power_output = 3; | ||
2477 | |||
2417 | if (priv->param_roamtrigger < -80) | 2478 | if (priv->param_roamtrigger < -80) |
2418 | priv->param_roamtrigger = -80; | 2479 | priv->param_roamtrigger = -80; |
2419 | else if (priv->param_roamtrigger > -60) | 2480 | else if (priv->param_roamtrigger > -60) |
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index ab1029e79884..2d611876bbe0 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig | |||
@@ -32,12 +32,13 @@ config RT2X00_LIB_FIRMWARE | |||
32 | config RT2X00_LIB_RFKILL | 32 | config RT2X00_LIB_RFKILL |
33 | boolean | 33 | boolean |
34 | depends on RT2X00_LIB | 34 | depends on RT2X00_LIB |
35 | depends on INPUT | ||
35 | select RFKILL | 36 | select RFKILL |
36 | select INPUT_POLLDEV | 37 | select INPUT_POLLDEV |
37 | 38 | ||
38 | config RT2X00_LIB_LEDS | 39 | config RT2X00_LIB_LEDS |
39 | boolean | 40 | boolean |
40 | depends on RT2X00_LIB | 41 | depends on RT2X00_LIB && NEW_LEDS |
41 | 42 | ||
42 | config RT2400PCI | 43 | config RT2400PCI |
43 | tristate "Ralink rt2400 pci/pcmcia support" | 44 | tristate "Ralink rt2400 pci/pcmcia support" |
@@ -51,7 +52,7 @@ config RT2400PCI | |||
51 | 52 | ||
52 | config RT2400PCI_RFKILL | 53 | config RT2400PCI_RFKILL |
53 | bool "RT2400 rfkill support" | 54 | bool "RT2400 rfkill support" |
54 | depends on RT2400PCI | 55 | depends on RT2400PCI && INPUT |
55 | select RT2X00_LIB_RFKILL | 56 | select RT2X00_LIB_RFKILL |
56 | ---help--- | 57 | ---help--- |
57 | This adds support for integrated rt2400 devices that feature a | 58 | This adds support for integrated rt2400 devices that feature a |
@@ -60,7 +61,7 @@ config RT2400PCI_RFKILL | |||
60 | 61 | ||
61 | config RT2400PCI_LEDS | 62 | config RT2400PCI_LEDS |
62 | bool "RT2400 leds support" | 63 | bool "RT2400 leds support" |
63 | depends on RT2400PCI | 64 | depends on RT2400PCI && NEW_LEDS |
64 | select LEDS_CLASS | 65 | select LEDS_CLASS |
65 | select RT2X00_LIB_LEDS | 66 | select RT2X00_LIB_LEDS |
66 | ---help--- | 67 | ---help--- |
@@ -78,7 +79,7 @@ config RT2500PCI | |||
78 | 79 | ||
79 | config RT2500PCI_RFKILL | 80 | config RT2500PCI_RFKILL |
80 | bool "RT2500 rfkill support" | 81 | bool "RT2500 rfkill support" |
81 | depends on RT2500PCI | 82 | depends on RT2500PCI && INPUT |
82 | select RT2X00_LIB_RFKILL | 83 | select RT2X00_LIB_RFKILL |
83 | ---help--- | 84 | ---help--- |
84 | This adds support for integrated rt2500 devices that feature a | 85 | This adds support for integrated rt2500 devices that feature a |
@@ -87,7 +88,7 @@ config RT2500PCI_RFKILL | |||
87 | 88 | ||
88 | config RT2500PCI_LEDS | 89 | config RT2500PCI_LEDS |
89 | bool "RT2500 leds support" | 90 | bool "RT2500 leds support" |
90 | depends on RT2500PCI | 91 | depends on RT2500PCI && NEW_LEDS |
91 | select LEDS_CLASS | 92 | select LEDS_CLASS |
92 | select RT2X00_LIB_LEDS | 93 | select RT2X00_LIB_LEDS |
93 | ---help--- | 94 | ---help--- |
@@ -107,7 +108,7 @@ config RT61PCI | |||
107 | 108 | ||
108 | config RT61PCI_RFKILL | 109 | config RT61PCI_RFKILL |
109 | bool "RT61 rfkill support" | 110 | bool "RT61 rfkill support" |
110 | depends on RT61PCI | 111 | depends on RT61PCI && INPUT |
111 | select RT2X00_LIB_RFKILL | 112 | select RT2X00_LIB_RFKILL |
112 | ---help--- | 113 | ---help--- |
113 | This adds support for integrated rt61 devices that feature a | 114 | This adds support for integrated rt61 devices that feature a |
@@ -116,7 +117,7 @@ config RT61PCI_RFKILL | |||
116 | 117 | ||
117 | config RT61PCI_LEDS | 118 | config RT61PCI_LEDS |
118 | bool "RT61 leds support" | 119 | bool "RT61 leds support" |
119 | depends on RT61PCI | 120 | depends on RT61PCI && NEW_LEDS |
120 | select LEDS_CLASS | 121 | select LEDS_CLASS |
121 | select RT2X00_LIB_LEDS | 122 | select RT2X00_LIB_LEDS |
122 | ---help--- | 123 | ---help--- |
@@ -133,7 +134,7 @@ config RT2500USB | |||
133 | 134 | ||
134 | config RT2500USB_LEDS | 135 | config RT2500USB_LEDS |
135 | bool "RT2500 leds support" | 136 | bool "RT2500 leds support" |
136 | depends on RT2500USB | 137 | depends on RT2500USB && NEW_LEDS |
137 | select LEDS_CLASS | 138 | select LEDS_CLASS |
138 | select RT2X00_LIB_LEDS | 139 | select RT2X00_LIB_LEDS |
139 | ---help--- | 140 | ---help--- |
@@ -152,7 +153,7 @@ config RT73USB | |||
152 | 153 | ||
153 | config RT73USB_LEDS | 154 | config RT73USB_LEDS |
154 | bool "RT73 leds support" | 155 | bool "RT73 leds support" |
155 | depends on RT73USB | 156 | depends on RT73USB && NEW_LEDS |
156 | select LEDS_CLASS | 157 | select LEDS_CLASS |
157 | select RT2X00_LIB_LEDS | 158 | select RT2X00_LIB_LEDS |
158 | ---help--- | 159 | ---help--- |
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 57bdc153952f..611d98320593 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h | |||
@@ -328,6 +328,11 @@ static inline int rt2x00_get_link_ant_rssi(struct link *link) | |||
328 | return DEFAULT_RSSI; | 328 | return DEFAULT_RSSI; |
329 | } | 329 | } |
330 | 330 | ||
331 | static inline void rt2x00_reset_link_ant_rssi(struct link *link) | ||
332 | { | ||
333 | link->ant.rssi_ant = 0; | ||
334 | } | ||
335 | |||
331 | static inline int rt2x00_get_link_ant_rssi_history(struct link *link, | 336 | static inline int rt2x00_get_link_ant_rssi_history(struct link *link, |
332 | enum antenna ant) | 337 | enum antenna ant) |
333 | { | 338 | { |
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c index a9930a03f450..48608e8cc8b4 100644 --- a/drivers/net/wireless/rt2x00/rt2x00config.c +++ b/drivers/net/wireless/rt2x00/rt2x00config.c | |||
@@ -129,6 +129,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev, | |||
129 | */ | 129 | */ |
130 | rt2x00dev->ops->lib->config(rt2x00dev, &libconf, CONFIG_UPDATE_ANTENNA); | 130 | rt2x00dev->ops->lib->config(rt2x00dev, &libconf, CONFIG_UPDATE_ANTENNA); |
131 | rt2x00lib_reset_link_tuner(rt2x00dev); | 131 | rt2x00lib_reset_link_tuner(rt2x00dev); |
132 | rt2x00_reset_link_ant_rssi(&rt2x00dev->link); | ||
132 | 133 | ||
133 | rt2x00dev->link.ant.active.rx = libconf.ant.rx; | 134 | rt2x00dev->link.ant.active.rx = libconf.ant.rx; |
134 | rt2x00dev->link.ant.active.tx = libconf.ant.tx; | 135 | rt2x00dev->link.ant.active.tx = libconf.ant.tx; |
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index b22c02737185..2673d568bcac 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c | |||
@@ -483,9 +483,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) | |||
483 | if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) | 483 | if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) |
484 | return; | 484 | return; |
485 | 485 | ||
486 | ieee80211_iterate_active_interfaces(rt2x00dev->hw, | 486 | ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, |
487 | rt2x00lib_beacondone_iter, | 487 | rt2x00lib_beacondone_iter, |
488 | rt2x00dev); | 488 | rt2x00dev); |
489 | 489 | ||
490 | queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->intf_work); | 490 | queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->intf_work); |
491 | } | 491 | } |
@@ -507,7 +507,7 @@ void rt2x00lib_txdone(struct queue_entry *entry, | |||
507 | * Update TX statistics. | 507 | * Update TX statistics. |
508 | */ | 508 | */ |
509 | rt2x00dev->link.qual.tx_success += success; | 509 | rt2x00dev->link.qual.tx_success += success; |
510 | rt2x00dev->link.qual.tx_failed += txdesc->retry + fail; | 510 | rt2x00dev->link.qual.tx_failed += fail; |
511 | 511 | ||
512 | /* | 512 | /* |
513 | * Initialize TX status | 513 | * Initialize TX status |
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index c206b5092070..87e280a21971 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c | |||
@@ -93,6 +93,7 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
93 | */ | 93 | */ |
94 | if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) { | 94 | if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) { |
95 | ieee80211_stop_queues(hw); | 95 | ieee80211_stop_queues(hw); |
96 | dev_kfree_skb_any(skb); | ||
96 | return NETDEV_TX_OK; | 97 | return NETDEV_TX_OK; |
97 | } | 98 | } |
98 | 99 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index 971af2546b59..60893de3bf8f 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c | |||
@@ -412,8 +412,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) | |||
412 | if (pci_set_mwi(pci_dev)) | 412 | if (pci_set_mwi(pci_dev)) |
413 | ERROR_PROBE("MWI not available.\n"); | 413 | ERROR_PROBE("MWI not available.\n"); |
414 | 414 | ||
415 | if (pci_set_dma_mask(pci_dev, DMA_64BIT_MASK) && | 415 | if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) { |
416 | pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) { | ||
417 | ERROR_PROBE("PCI DMA not supported.\n"); | 416 | ERROR_PROBE("PCI DMA not supported.\n"); |
418 | retval = -EIO; | 417 | retval = -EIO; |
419 | goto exit_disable_device; | 418 | goto exit_disable_device; |
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index 5a331674dcb2..e5ceae805b57 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c | |||
@@ -362,6 +362,12 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) | |||
362 | } | 362 | } |
363 | } | 363 | } |
364 | 364 | ||
365 | /* | ||
366 | * Kill guardian urb (if required by driver). | ||
367 | */ | ||
368 | if (!test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)) | ||
369 | return; | ||
370 | |||
365 | for (i = 0; i < rt2x00dev->bcn->limit; i++) { | 371 | for (i = 0; i < rt2x00dev->bcn->limit; i++) { |
366 | priv_bcn = rt2x00dev->bcn->entries[i].priv_data; | 372 | priv_bcn = rt2x00dev->bcn->entries[i].priv_data; |
367 | usb_kill_urb(priv_bcn->urb); | 373 | usb_kill_urb(priv_bcn->urb); |
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index da19a3a91f4d..fff8386e816b 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c | |||
@@ -2131,6 +2131,7 @@ static struct usb_device_id rt73usb_device_table[] = { | |||
2131 | /* D-Link */ | 2131 | /* D-Link */ |
2132 | { USB_DEVICE(0x07d1, 0x3c03), USB_DEVICE_DATA(&rt73usb_ops) }, | 2132 | { USB_DEVICE(0x07d1, 0x3c03), USB_DEVICE_DATA(&rt73usb_ops) }, |
2133 | { USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) }, | 2133 | { USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) }, |
2134 | { USB_DEVICE(0x07d1, 0x3c06), USB_DEVICE_DATA(&rt73usb_ops) }, | ||
2134 | { USB_DEVICE(0x07d1, 0x3c07), USB_DEVICE_DATA(&rt73usb_ops) }, | 2135 | { USB_DEVICE(0x07d1, 0x3c07), USB_DEVICE_DATA(&rt73usb_ops) }, |
2135 | /* Gemtek */ | 2136 | /* Gemtek */ |
2136 | { USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) }, | 2137 | { USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) }, |
diff --git a/drivers/net/wireless/rtl8180_grf5101.c b/drivers/net/wireless/rtl8180_grf5101.c index 5d47935dbac3..947ee55f18b2 100644 --- a/drivers/net/wireless/rtl8180_grf5101.c +++ b/drivers/net/wireless/rtl8180_grf5101.c | |||
@@ -88,7 +88,7 @@ static void grf5101_rf_set_channel(struct ieee80211_hw *dev, | |||
88 | write_grf5101(dev, 0x0B, chan); | 88 | write_grf5101(dev, 0x0B, chan); |
89 | write_grf5101(dev, 0x07, 0x1000); | 89 | write_grf5101(dev, 0x07, 0x1000); |
90 | 90 | ||
91 | grf5101_write_phy_antenna(dev, chan); | 91 | grf5101_write_phy_antenna(dev, channel); |
92 | } | 92 | } |
93 | 93 | ||
94 | static void grf5101_rf_stop(struct ieee80211_hw *dev) | 94 | static void grf5101_rf_stop(struct ieee80211_hw *dev) |
diff --git a/drivers/net/wireless/rtl8180_max2820.c b/drivers/net/wireless/rtl8180_max2820.c index a34dfd382b6d..6c825fd7f3b6 100644 --- a/drivers/net/wireless/rtl8180_max2820.c +++ b/drivers/net/wireless/rtl8180_max2820.c | |||
@@ -78,7 +78,8 @@ static void max2820_rf_set_channel(struct ieee80211_hw *dev, | |||
78 | struct ieee80211_conf *conf) | 78 | struct ieee80211_conf *conf) |
79 | { | 79 | { |
80 | struct rtl8180_priv *priv = dev->priv; | 80 | struct rtl8180_priv *priv = dev->priv; |
81 | int channel = ieee80211_frequency_to_channel(conf->channel->center_freq); | 81 | int channel = conf ? |
82 | ieee80211_frequency_to_channel(conf->channel->center_freq) : 1; | ||
82 | unsigned int chan_idx = channel - 1; | 83 | unsigned int chan_idx = channel - 1; |
83 | u32 txpw = priv->channels[chan_idx].hw_value & 0xFF; | 84 | u32 txpw = priv->channels[chan_idx].hw_value & 0xFF; |
84 | u32 chan = max2820_chan[chan_idx]; | 85 | u32 chan = max2820_chan[chan_idx]; |
@@ -87,7 +88,7 @@ static void max2820_rf_set_channel(struct ieee80211_hw *dev, | |||
87 | * sa2400, for MAXIM we do this directly from BB */ | 88 | * sa2400, for MAXIM we do this directly from BB */ |
88 | rtl8180_write_phy(dev, 3, txpw); | 89 | rtl8180_write_phy(dev, 3, txpw); |
89 | 90 | ||
90 | max2820_write_phy_antenna(dev, chan); | 91 | max2820_write_phy_antenna(dev, channel); |
91 | write_max2820(dev, 3, chan); | 92 | write_max2820(dev, 3, chan); |
92 | } | 93 | } |
93 | 94 | ||
diff --git a/drivers/net/wireless/rtl8180_sa2400.c b/drivers/net/wireless/rtl8180_sa2400.c index 0311b4ea124c..cea4e0ccb92d 100644 --- a/drivers/net/wireless/rtl8180_sa2400.c +++ b/drivers/net/wireless/rtl8180_sa2400.c | |||
@@ -86,7 +86,7 @@ static void sa2400_rf_set_channel(struct ieee80211_hw *dev, | |||
86 | 86 | ||
87 | write_sa2400(dev, 7, txpw); | 87 | write_sa2400(dev, 7, txpw); |
88 | 88 | ||
89 | sa2400_write_phy_antenna(dev, chan); | 89 | sa2400_write_phy_antenna(dev, channel); |
90 | 90 | ||
91 | write_sa2400(dev, 0, chan); | 91 | write_sa2400(dev, 0, chan); |
92 | write_sa2400(dev, 1, 0xbb50); | 92 | write_sa2400(dev, 1, 0xbb50); |
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c index d5787b37e1fb..9223ada5f00e 100644 --- a/drivers/net/wireless/rtl8187_dev.c +++ b/drivers/net/wireless/rtl8187_dev.c | |||
@@ -92,6 +92,7 @@ static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr, | |||
92 | u8 data[4]; | 92 | u8 data[4]; |
93 | struct usb_ctrlrequest dr; | 93 | struct usb_ctrlrequest dr; |
94 | } *buf; | 94 | } *buf; |
95 | int rc; | ||
95 | 96 | ||
96 | buf = kmalloc(sizeof(*buf), GFP_ATOMIC); | 97 | buf = kmalloc(sizeof(*buf), GFP_ATOMIC); |
97 | if (!buf) | 98 | if (!buf) |
@@ -116,7 +117,11 @@ static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr, | |||
116 | usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0), | 117 | usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0), |
117 | (unsigned char *)dr, buf, len, | 118 | (unsigned char *)dr, buf, len, |
118 | rtl8187_iowrite_async_cb, buf); | 119 | rtl8187_iowrite_async_cb, buf); |
119 | usb_submit_urb(urb, GFP_ATOMIC); | 120 | rc = usb_submit_urb(urb, GFP_ATOMIC); |
121 | if (rc < 0) { | ||
122 | kfree(buf); | ||
123 | usb_free_urb(urb); | ||
124 | } | ||
120 | } | 125 | } |
121 | 126 | ||
122 | static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv, | 127 | static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv, |
@@ -169,6 +174,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb, | |||
169 | struct urb *urb; | 174 | struct urb *urb; |
170 | __le16 rts_dur = 0; | 175 | __le16 rts_dur = 0; |
171 | u32 flags; | 176 | u32 flags; |
177 | int rc; | ||
172 | 178 | ||
173 | urb = usb_alloc_urb(0, GFP_ATOMIC); | 179 | urb = usb_alloc_urb(0, GFP_ATOMIC); |
174 | if (!urb) { | 180 | if (!urb) { |
@@ -208,7 +214,11 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb, | |||
208 | info->dev = dev; | 214 | info->dev = dev; |
209 | usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2), | 215 | usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2), |
210 | hdr, skb->len, rtl8187_tx_cb, skb); | 216 | hdr, skb->len, rtl8187_tx_cb, skb); |
211 | usb_submit_urb(urb, GFP_ATOMIC); | 217 | rc = usb_submit_urb(urb, GFP_ATOMIC); |
218 | if (rc < 0) { | ||
219 | usb_free_urb(urb); | ||
220 | kfree_skb(skb); | ||
221 | } | ||
212 | 222 | ||
213 | return 0; | 223 | return 0; |
214 | } | 224 | } |
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 69c45ca99051..418606ac1c3b 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c | |||
@@ -719,7 +719,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length) | |||
719 | fc = le16_to_cpu(*((__le16 *) buffer)); | 719 | fc = le16_to_cpu(*((__le16 *) buffer)); |
720 | 720 | ||
721 | is_qos = ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && | 721 | is_qos = ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && |
722 | ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_QOS_DATA); | 722 | (fc & IEEE80211_STYPE_QOS_DATA); |
723 | is_4addr = (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == | 723 | is_4addr = (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == |
724 | (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); | 724 | (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); |
725 | need_padding = is_qos ^ is_4addr; | 725 | need_padding = is_qos ^ is_4addr; |
@@ -805,7 +805,7 @@ void zd_process_intr(struct work_struct *work) | |||
805 | u16 int_status; | 805 | u16 int_status; |
806 | struct zd_mac *mac = container_of(work, struct zd_mac, process_intr); | 806 | struct zd_mac *mac = container_of(work, struct zd_mac, process_intr); |
807 | 807 | ||
808 | int_status = le16_to_cpu(*(u16 *)(mac->intr_buffer+4)); | 808 | int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer+4)); |
809 | if (int_status & INT_CFG_NEXT_BCN) { | 809 | if (int_status & INT_CFG_NEXT_BCN) { |
810 | if (net_ratelimit()) | 810 | if (net_ratelimit()) |
811 | dev_dbg_f(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n"); | 811 | dev_dbg_f(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n"); |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 12e24f04dddf..8941f5eb96c2 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
@@ -342,7 +342,7 @@ static inline void handle_regs_int(struct urb *urb) | |||
342 | ZD_ASSERT(in_interrupt()); | 342 | ZD_ASSERT(in_interrupt()); |
343 | spin_lock(&intr->lock); | 343 | spin_lock(&intr->lock); |
344 | 344 | ||
345 | int_num = le16_to_cpu(*(u16 *)(urb->transfer_buffer+2)); | 345 | int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2)); |
346 | if (int_num == CR_INTERRUPT) { | 346 | if (int_num == CR_INTERRUPT) { |
347 | struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context)); | 347 | struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context)); |
348 | memcpy(&mac->intr_buffer, urb->transfer_buffer, | 348 | memcpy(&mac->intr_buffer, urb->transfer_buffer, |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8bddff150c70..d26f69b0184f 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -946,8 +946,7 @@ err: | |||
946 | work_done++; | 946 | work_done++; |
947 | } | 947 | } |
948 | 948 | ||
949 | while ((skb = __skb_dequeue(&errq))) | 949 | __skb_queue_purge(&errq); |
950 | kfree_skb(skb); | ||
951 | 950 | ||
952 | work_done -= handle_incoming_queue(dev, &rxq); | 951 | work_done -= handle_incoming_queue(dev, &rxq); |
953 | 952 | ||
@@ -1079,8 +1078,7 @@ static void xennet_release_rx_bufs(struct netfront_info *np) | |||
1079 | } | 1078 | } |
1080 | } | 1079 | } |
1081 | 1080 | ||
1082 | while ((skb = __skb_dequeue(&free_list)) != NULL) | 1081 | __skb_queue_purge(&free_list); |
1083 | dev_kfree_skb(skb); | ||
1084 | 1082 | ||
1085 | spin_unlock_bh(&np->rx_lock); | 1083 | spin_unlock_bh(&np->rx_lock); |
1086 | } | 1084 | } |