diff options
35 files changed, 556 insertions, 290 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 85fa40a0a667..9ba1f0b46429 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -1836,10 +1836,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1836 | 1836 | ||
1837 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && | 1837 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && |
1838 | pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) { | 1838 | pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) { |
1839 | dev_err(&pdev->dev, | 1839 | dev_info(&pdev->dev, |
1840 | "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n", | 1840 | "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n", |
1841 | pdev->vendor, pdev->device, pdev->revision); | 1841 | pdev->vendor, pdev->device, pdev->revision); |
1842 | dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n"); | ||
1843 | return -ENODEV; | 1842 | return -ENODEV; |
1844 | } | 1843 | } |
1845 | 1844 | ||
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 0daf8c15e381..63f906b04899 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -946,10 +946,9 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, | |||
946 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && | 946 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && |
947 | pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) { | 947 | pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) { |
948 | dev_info(&pdev->dev, | 948 | dev_info(&pdev->dev, |
949 | "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n", | 949 | "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip, use 8139cp\n", |
950 | pdev->vendor, pdev->device, pdev->revision); | 950 | pdev->vendor, pdev->device, pdev->revision); |
951 | dev_info(&pdev->dev, | 951 | return -ENODEV; |
952 | "Use the \"8139cp\" driver for improved performance and stability.\n"); | ||
953 | } | 952 | } |
954 | 953 | ||
955 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && | 954 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 0b71ebc074b6..f749b40f954e 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -894,7 +894,7 @@ config SMC91X | |||
894 | select CRC32 | 894 | select CRC32 |
895 | select MII | 895 | select MII |
896 | depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \ | 896 | depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \ |
897 | SOC_AU1X00 || BLACKFIN || MN10300 | 897 | MIPS || BLACKFIN || MN10300 |
898 | help | 898 | help |
899 | This is a driver for SMC's 91x series of Ethernet chipsets, | 899 | This is a driver for SMC's 91x series of Ethernet chipsets, |
900 | including the SMC91C94 and the SMC91C111. Say Y if you want it | 900 | including the SMC91C94 and the SMC91C111. Say Y if you want it |
@@ -966,7 +966,7 @@ config SMC911X | |||
966 | tristate "SMSC LAN911[5678] support" | 966 | tristate "SMSC LAN911[5678] support" |
967 | select CRC32 | 967 | select CRC32 |
968 | select MII | 968 | select MII |
969 | depends on ARCH_PXA || SUPERH | 969 | depends on ARM || SUPERH |
970 | help | 970 | help |
971 | This is a driver for SMSC's LAN911x series of Ethernet chipsets | 971 | This is a driver for SMSC's LAN911x series of Ethernet chipsets |
972 | including the new LAN9115, LAN9116, LAN9117, and LAN9118. | 972 | including the new LAN9115, LAN9116, LAN9117, and LAN9118. |
@@ -2009,6 +2009,11 @@ config IGB_LRO | |||
2009 | 2009 | ||
2010 | If in doubt, say N. | 2010 | If in doubt, say N. |
2011 | 2011 | ||
2012 | config IGB_DCA | ||
2013 | bool "Enable DCA" | ||
2014 | default y | ||
2015 | depends on IGB && DCA && !(IGB=y && DCA=m) | ||
2016 | |||
2012 | source "drivers/net/ixp2000/Kconfig" | 2017 | source "drivers/net/ixp2000/Kconfig" |
2013 | 2018 | ||
2014 | config MYRI_SBUS | 2019 | config MYRI_SBUS |
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c index 4207d6efddc0..9a314d88e7b6 100644 --- a/drivers/net/ax88796.c +++ b/drivers/net/ax88796.c | |||
@@ -838,12 +838,12 @@ static int ax_probe(struct platform_device *pdev) | |||
838 | 838 | ||
839 | /* find the platform resources */ | 839 | /* find the platform resources */ |
840 | 840 | ||
841 | dev->irq = platform_get_irq(pdev, 0); | 841 | ret = platform_get_irq(pdev, 0); |
842 | if (dev->irq < 0) { | 842 | if (ret < 0) { |
843 | dev_err(&pdev->dev, "no IRQ specified\n"); | 843 | dev_err(&pdev->dev, "no IRQ specified\n"); |
844 | ret = -ENXIO; | ||
845 | goto exit_mem; | 844 | goto exit_mem; |
846 | } | 845 | } |
846 | dev->irq = ret; | ||
847 | 847 | ||
848 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 848 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
849 | if (res == NULL) { | 849 | if (res == NULL) { |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 8e2be24f3fe4..832739f38db4 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1341,18 +1341,24 @@ static int bond_compute_features(struct bonding *bond) | |||
1341 | int i; | 1341 | int i; |
1342 | 1342 | ||
1343 | features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); | 1343 | features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); |
1344 | features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | | 1344 | features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM; |
1345 | NETIF_F_GSO_MASK | NETIF_F_NO_CSUM; | 1345 | |
1346 | if (!bond->first_slave) | ||
1347 | goto done; | ||
1348 | |||
1349 | features &= ~NETIF_F_ONE_FOR_ALL; | ||
1346 | 1350 | ||
1347 | bond_for_each_slave(bond, slave, i) { | 1351 | bond_for_each_slave(bond, slave, i) { |
1348 | features = netdev_compute_features(features, | 1352 | features = netdev_increment_features(features, |
1349 | slave->dev->features); | 1353 | slave->dev->features, |
1354 | NETIF_F_ONE_FOR_ALL); | ||
1350 | if (slave->dev->hard_header_len > max_hard_header_len) | 1355 | if (slave->dev->hard_header_len > max_hard_header_len) |
1351 | max_hard_header_len = slave->dev->hard_header_len; | 1356 | max_hard_header_len = slave->dev->hard_header_len; |
1352 | } | 1357 | } |
1353 | 1358 | ||
1359 | done: | ||
1354 | features |= (bond_dev->features & BOND_VLAN_FEATURES); | 1360 | features |= (bond_dev->features & BOND_VLAN_FEATURES); |
1355 | bond_dev->features = features; | 1361 | bond_dev->features = netdev_fix_features(features, NULL); |
1356 | bond_dev->hard_header_len = max_hard_header_len; | 1362 | bond_dev->hard_header_len = max_hard_header_len; |
1357 | 1363 | ||
1358 | return 0; | 1364 | return 0; |
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c index 4407ac9bb555..ff1611f90e7a 100644 --- a/drivers/net/cxgb3/l2t.c +++ b/drivers/net/cxgb3/l2t.c | |||
@@ -431,6 +431,7 @@ struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) | |||
431 | for (i = 0; i < l2t_capacity; ++i) { | 431 | for (i = 0; i < l2t_capacity; ++i) { |
432 | d->l2tab[i].idx = i; | 432 | d->l2tab[i].idx = i; |
433 | d->l2tab[i].state = L2T_STATE_UNUSED; | 433 | d->l2tab[i].state = L2T_STATE_UNUSED; |
434 | __skb_queue_head_init(&d->l2tab[i].arpq); | ||
434 | spin_lock_init(&d->l2tab[i].lock); | 435 | spin_lock_init(&d->l2tab[i].lock); |
435 | atomic_set(&d->l2tab[i].refcnt, 0); | 436 | atomic_set(&d->l2tab[i].refcnt, 0); |
436 | } | 437 | } |
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index f42c23f42652..5a9083e3f443 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -47,15 +47,6 @@ | |||
47 | #define CARDNAME "dm9000" | 47 | #define CARDNAME "dm9000" |
48 | #define DRV_VERSION "1.31" | 48 | #define DRV_VERSION "1.31" |
49 | 49 | ||
50 | #ifdef CONFIG_BLACKFIN | ||
51 | #define readsb insb | ||
52 | #define readsw insw | ||
53 | #define readsl insl | ||
54 | #define writesb outsb | ||
55 | #define writesw outsw | ||
56 | #define writesl outsl | ||
57 | #endif | ||
58 | |||
59 | /* | 50 | /* |
60 | * Transmit timeout, default 5 seconds. | 51 | * Transmit timeout, default 5 seconds. |
61 | */ | 52 | */ |
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 5524271eedca..82dd1a891ce7 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | #define DRV_NAME "ehea" | 42 | #define DRV_NAME "ehea" |
43 | #define DRV_VERSION "EHEA_0093" | 43 | #define DRV_VERSION "EHEA_0094" |
44 | 44 | ||
45 | /* eHEA capability flags */ | 45 | /* eHEA capability flags */ |
46 | #define DLPAR_PORT_ADD_REM 1 | 46 | #define DLPAR_PORT_ADD_REM 1 |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index b70c5314f537..422fcb93e2c3 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -2863,7 +2863,7 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
2863 | struct ehea_adapter *adapter; | 2863 | struct ehea_adapter *adapter; |
2864 | 2864 | ||
2865 | mutex_lock(&dlpar_mem_lock); | 2865 | mutex_lock(&dlpar_mem_lock); |
2866 | ehea_info("LPAR memory enlarged - re-initializing driver"); | 2866 | ehea_info("LPAR memory changed - re-initializing driver"); |
2867 | 2867 | ||
2868 | list_for_each_entry(adapter, &adapter_list, list) | 2868 | list_for_each_entry(adapter, &adapter_list, list) |
2869 | if (adapter->active_ports) { | 2869 | if (adapter->active_ports) { |
@@ -2900,13 +2900,6 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
2900 | } | 2900 | } |
2901 | } | 2901 | } |
2902 | 2902 | ||
2903 | ehea_destroy_busmap(); | ||
2904 | ret = ehea_create_busmap(); | ||
2905 | if (ret) { | ||
2906 | ehea_error("creating ehea busmap failed"); | ||
2907 | goto out; | ||
2908 | } | ||
2909 | |||
2910 | clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | 2903 | clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags); |
2911 | 2904 | ||
2912 | list_for_each_entry(adapter, &adapter_list, list) | 2905 | list_for_each_entry(adapter, &adapter_list, list) |
@@ -3519,9 +3512,21 @@ void ehea_crash_handler(void) | |||
3519 | static int ehea_mem_notifier(struct notifier_block *nb, | 3512 | static int ehea_mem_notifier(struct notifier_block *nb, |
3520 | unsigned long action, void *data) | 3513 | unsigned long action, void *data) |
3521 | { | 3514 | { |
3515 | struct memory_notify *arg = data; | ||
3522 | switch (action) { | 3516 | switch (action) { |
3523 | case MEM_OFFLINE: | 3517 | case MEM_CANCEL_OFFLINE: |
3524 | ehea_info("memory has been removed"); | 3518 | ehea_info("memory offlining canceled"); |
3519 | /* Readd canceled memory block */ | ||
3520 | case MEM_ONLINE: | ||
3521 | ehea_info("memory is going online"); | ||
3522 | if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) | ||
3523 | return NOTIFY_BAD; | ||
3524 | ehea_rereg_mrs(NULL); | ||
3525 | break; | ||
3526 | case MEM_GOING_OFFLINE: | ||
3527 | ehea_info("memory is going offline"); | ||
3528 | if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) | ||
3529 | return NOTIFY_BAD; | ||
3525 | ehea_rereg_mrs(NULL); | 3530 | ehea_rereg_mrs(NULL); |
3526 | break; | 3531 | break; |
3527 | default: | 3532 | default: |
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index db8a9257e680..9b61dc9865d1 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c | |||
@@ -567,7 +567,7 @@ static inline int ehea_calc_index(unsigned long i, unsigned long s) | |||
567 | static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap, | 567 | static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap, |
568 | int dir) | 568 | int dir) |
569 | { | 569 | { |
570 | if(!ehea_top_bmap->dir[dir]) { | 570 | if (!ehea_top_bmap->dir[dir]) { |
571 | ehea_top_bmap->dir[dir] = | 571 | ehea_top_bmap->dir[dir] = |
572 | kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL); | 572 | kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL); |
573 | if (!ehea_top_bmap->dir[dir]) | 573 | if (!ehea_top_bmap->dir[dir]) |
@@ -578,7 +578,7 @@ static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap, | |||
578 | 578 | ||
579 | static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir) | 579 | static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir) |
580 | { | 580 | { |
581 | if(!ehea_bmap->top[top]) { | 581 | if (!ehea_bmap->top[top]) { |
582 | ehea_bmap->top[top] = | 582 | ehea_bmap->top[top] = |
583 | kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL); | 583 | kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL); |
584 | if (!ehea_bmap->top[top]) | 584 | if (!ehea_bmap->top[top]) |
@@ -587,53 +587,124 @@ static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir) | |||
587 | return ehea_init_top_bmap(ehea_bmap->top[top], dir); | 587 | return ehea_init_top_bmap(ehea_bmap->top[top], dir); |
588 | } | 588 | } |
589 | 589 | ||
590 | static int ehea_create_busmap_callback(unsigned long pfn, | 590 | static DEFINE_MUTEX(ehea_busmap_mutex); |
591 | unsigned long nr_pages, void *arg) | 591 | static unsigned long ehea_mr_len; |
592 | { | ||
593 | unsigned long i, mr_len, start_section, end_section; | ||
594 | start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE; | ||
595 | end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE); | ||
596 | mr_len = *(unsigned long *)arg; | ||
597 | 592 | ||
598 | if (!ehea_bmap) | 593 | #define EHEA_BUSMAP_ADD_SECT 1 |
599 | ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); | 594 | #define EHEA_BUSMAP_REM_SECT 0 |
600 | if (!ehea_bmap) | ||
601 | return -ENOMEM; | ||
602 | 595 | ||
603 | for (i = start_section; i < end_section; i++) { | 596 | static void ehea_rebuild_busmap(void) |
604 | int ret; | 597 | { |
605 | int top, dir, idx; | 598 | u64 vaddr = EHEA_BUSMAP_START; |
606 | u64 vaddr; | 599 | int top, dir, idx; |
600 | |||
601 | for (top = 0; top < EHEA_MAP_ENTRIES; top++) { | ||
602 | struct ehea_top_bmap *ehea_top; | ||
603 | int valid_dir_entries = 0; | ||
607 | 604 | ||
608 | top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT); | 605 | if (!ehea_bmap->top[top]) |
609 | dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT); | 606 | continue; |
607 | ehea_top = ehea_bmap->top[top]; | ||
608 | for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) { | ||
609 | struct ehea_dir_bmap *ehea_dir; | ||
610 | int valid_entries = 0; | ||
610 | 611 | ||
611 | ret = ehea_init_bmap(ehea_bmap, top, dir); | 612 | if (!ehea_top->dir[dir]) |
612 | if(ret) | 613 | continue; |
613 | return ret; | 614 | valid_dir_entries++; |
615 | ehea_dir = ehea_top->dir[dir]; | ||
616 | for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) { | ||
617 | if (!ehea_dir->ent[idx]) | ||
618 | continue; | ||
619 | valid_entries++; | ||
620 | ehea_dir->ent[idx] = vaddr; | ||
621 | vaddr += EHEA_SECTSIZE; | ||
622 | } | ||
623 | if (!valid_entries) { | ||
624 | ehea_top->dir[dir] = NULL; | ||
625 | kfree(ehea_dir); | ||
626 | } | ||
627 | } | ||
628 | if (!valid_dir_entries) { | ||
629 | ehea_bmap->top[top] = NULL; | ||
630 | kfree(ehea_top); | ||
631 | } | ||
632 | } | ||
633 | } | ||
614 | 634 | ||
615 | idx = i & EHEA_INDEX_MASK; | 635 | static int ehea_update_busmap(unsigned long pfn, unsigned long pgnum, int add) |
616 | vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE; | 636 | { |
637 | unsigned long i, start_section, end_section; | ||
617 | 638 | ||
618 | ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr; | 639 | if (!ehea_bmap) { |
640 | ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); | ||
641 | if (!ehea_bmap) | ||
642 | return -ENOMEM; | ||
619 | } | 643 | } |
620 | 644 | ||
621 | mr_len += nr_pages * PAGE_SIZE; | 645 | start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE; |
622 | *(unsigned long *)arg = mr_len; | 646 | end_section = start_section + ((pgnum * PAGE_SIZE) / EHEA_SECTSIZE); |
647 | /* Mark entries as valid or invalid only; address is assigned later */ | ||
648 | for (i = start_section; i < end_section; i++) { | ||
649 | u64 flag; | ||
650 | int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT); | ||
651 | int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT); | ||
652 | int idx = i & EHEA_INDEX_MASK; | ||
653 | |||
654 | if (add) { | ||
655 | int ret = ehea_init_bmap(ehea_bmap, top, dir); | ||
656 | if (ret) | ||
657 | return ret; | ||
658 | flag = 1; /* valid */ | ||
659 | ehea_mr_len += EHEA_SECTSIZE; | ||
660 | } else { | ||
661 | if (!ehea_bmap->top[top]) | ||
662 | continue; | ||
663 | if (!ehea_bmap->top[top]->dir[dir]) | ||
664 | continue; | ||
665 | flag = 0; /* invalid */ | ||
666 | ehea_mr_len -= EHEA_SECTSIZE; | ||
667 | } | ||
623 | 668 | ||
669 | ehea_bmap->top[top]->dir[dir]->ent[idx] = flag; | ||
670 | } | ||
671 | ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */ | ||
624 | return 0; | 672 | return 0; |
625 | } | 673 | } |
626 | 674 | ||
627 | static unsigned long ehea_mr_len; | 675 | int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages) |
676 | { | ||
677 | int ret; | ||
628 | 678 | ||
629 | static DEFINE_MUTEX(ehea_busmap_mutex); | 679 | mutex_lock(&ehea_busmap_mutex); |
680 | ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT); | ||
681 | mutex_unlock(&ehea_busmap_mutex); | ||
682 | return ret; | ||
683 | } | ||
684 | |||
685 | int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages) | ||
686 | { | ||
687 | int ret; | ||
688 | |||
689 | mutex_lock(&ehea_busmap_mutex); | ||
690 | ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT); | ||
691 | mutex_unlock(&ehea_busmap_mutex); | ||
692 | return ret; | ||
693 | } | ||
694 | |||
695 | static int ehea_create_busmap_callback(unsigned long pfn, | ||
696 | unsigned long nr_pages, void *arg) | ||
697 | { | ||
698 | return ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT); | ||
699 | } | ||
630 | 700 | ||
631 | int ehea_create_busmap(void) | 701 | int ehea_create_busmap(void) |
632 | { | 702 | { |
633 | int ret; | 703 | int ret; |
704 | |||
634 | mutex_lock(&ehea_busmap_mutex); | 705 | mutex_lock(&ehea_busmap_mutex); |
635 | ehea_mr_len = 0; | 706 | ehea_mr_len = 0; |
636 | ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len, | 707 | ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL, |
637 | ehea_create_busmap_callback); | 708 | ehea_create_busmap_callback); |
638 | mutex_unlock(&ehea_busmap_mutex); | 709 | mutex_unlock(&ehea_busmap_mutex); |
639 | return ret; | 710 | return ret; |
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h index 0bb6f92fa2f8..1e58dc06b7d2 100644 --- a/drivers/net/ehea/ehea_qmr.h +++ b/drivers/net/ehea/ehea_qmr.h | |||
@@ -378,6 +378,8 @@ int ehea_rem_mr(struct ehea_mr *mr); | |||
378 | 378 | ||
379 | void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); | 379 | void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); |
380 | 380 | ||
381 | int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages); | ||
382 | int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages); | ||
381 | int ehea_create_busmap(void); | 383 | int ehea_create_busmap(void); |
382 | void ehea_destroy_busmap(void); | 384 | void ehea_destroy_busmap(void); |
383 | u64 ehea_map_vaddr(void *caddr); | 385 | u64 ehea_map_vaddr(void *caddr); |
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index 4e4f68304e82..aec3b97e794d 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c | |||
@@ -401,6 +401,21 @@ static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *d | |||
401 | return 0; | 401 | return 0; |
402 | } | 402 | } |
403 | 403 | ||
404 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
405 | static void mpc52xx_fec_poll_controller(struct net_device *dev) | ||
406 | { | ||
407 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
408 | |||
409 | disable_irq(priv->t_irq); | ||
410 | mpc52xx_fec_tx_interrupt(priv->t_irq, dev); | ||
411 | enable_irq(priv->t_irq); | ||
412 | disable_irq(priv->r_irq); | ||
413 | mpc52xx_fec_rx_interrupt(priv->r_irq, dev); | ||
414 | enable_irq(priv->r_irq); | ||
415 | } | ||
416 | #endif | ||
417 | |||
418 | |||
404 | /* This handles BestComm transmit task interrupts | 419 | /* This handles BestComm transmit task interrupts |
405 | */ | 420 | */ |
406 | static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) | 421 | static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) |
@@ -926,6 +941,9 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) | |||
926 | ndev->tx_timeout = mpc52xx_fec_tx_timeout; | 941 | ndev->tx_timeout = mpc52xx_fec_tx_timeout; |
927 | ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT; | 942 | ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT; |
928 | ndev->base_addr = mem.start; | 943 | ndev->base_addr = mem.start; |
944 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
945 | ndev->poll_controller = mpc52xx_fec_poll_controller; | ||
946 | #endif | ||
929 | 947 | ||
930 | priv->t_irq = priv->r_irq = ndev->irq = NO_IRQ; /* IRQ are free for now */ | 948 | priv->t_irq = priv->r_irq = ndev->irq = NO_IRQ; /* IRQ are free for now */ |
931 | 949 | ||
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index b5bb7ae2817f..64b201134fdb 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -161,7 +161,7 @@ static int gfar_probe(struct platform_device *pdev) | |||
161 | struct gfar_private *priv = NULL; | 161 | struct gfar_private *priv = NULL; |
162 | struct gianfar_platform_data *einfo; | 162 | struct gianfar_platform_data *einfo; |
163 | struct resource *r; | 163 | struct resource *r; |
164 | int err = 0; | 164 | int err = 0, irq; |
165 | DECLARE_MAC_BUF(mac); | 165 | DECLARE_MAC_BUF(mac); |
166 | 166 | ||
167 | einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; | 167 | einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; |
@@ -187,15 +187,25 @@ static int gfar_probe(struct platform_device *pdev) | |||
187 | 187 | ||
188 | /* fill out IRQ fields */ | 188 | /* fill out IRQ fields */ |
189 | if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | 189 | if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
190 | priv->interruptTransmit = platform_get_irq_byname(pdev, "tx"); | 190 | irq = platform_get_irq_byname(pdev, "tx"); |
191 | priv->interruptReceive = platform_get_irq_byname(pdev, "rx"); | 191 | if (irq < 0) |
192 | priv->interruptError = platform_get_irq_byname(pdev, "error"); | 192 | goto regs_fail; |
193 | if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0) | 193 | priv->interruptTransmit = irq; |
194 | |||
195 | irq = platform_get_irq_byname(pdev, "rx"); | ||
196 | if (irq < 0) | ||
197 | goto regs_fail; | ||
198 | priv->interruptReceive = irq; | ||
199 | |||
200 | irq = platform_get_irq_byname(pdev, "error"); | ||
201 | if (irq < 0) | ||
194 | goto regs_fail; | 202 | goto regs_fail; |
203 | priv->interruptError = irq; | ||
195 | } else { | 204 | } else { |
196 | priv->interruptTransmit = platform_get_irq(pdev, 0); | 205 | irq = platform_get_irq(pdev, 0); |
197 | if (priv->interruptTransmit < 0) | 206 | if (irq < 0) |
198 | goto regs_fail; | 207 | goto regs_fail; |
208 | priv->interruptTransmit = irq; | ||
199 | } | 209 | } |
200 | 210 | ||
201 | /* get a pointer to the register memory */ | 211 | /* get a pointer to the register memory */ |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 93d02efa9a0a..1f397cd99414 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -38,10 +38,11 @@ | |||
38 | #include <linux/ethtool.h> | 38 | #include <linux/ethtool.h> |
39 | #include <linux/if_vlan.h> | 39 | #include <linux/if_vlan.h> |
40 | #include <linux/pci.h> | 40 | #include <linux/pci.h> |
41 | #include <linux/pci-aspm.h> | ||
41 | #include <linux/delay.h> | 42 | #include <linux/delay.h> |
42 | #include <linux/interrupt.h> | 43 | #include <linux/interrupt.h> |
43 | #include <linux/if_ether.h> | 44 | #include <linux/if_ether.h> |
44 | #ifdef CONFIG_DCA | 45 | #ifdef CONFIG_IGB_DCA |
45 | #include <linux/dca.h> | 46 | #include <linux/dca.h> |
46 | #endif | 47 | #endif |
47 | #include "igb.h" | 48 | #include "igb.h" |
@@ -106,11 +107,11 @@ static irqreturn_t igb_msix_other(int irq, void *); | |||
106 | static irqreturn_t igb_msix_rx(int irq, void *); | 107 | static irqreturn_t igb_msix_rx(int irq, void *); |
107 | static irqreturn_t igb_msix_tx(int irq, void *); | 108 | static irqreturn_t igb_msix_tx(int irq, void *); |
108 | static int igb_clean_rx_ring_msix(struct napi_struct *, int); | 109 | static int igb_clean_rx_ring_msix(struct napi_struct *, int); |
109 | #ifdef CONFIG_DCA | 110 | #ifdef CONFIG_IGB_DCA |
110 | static void igb_update_rx_dca(struct igb_ring *); | 111 | static void igb_update_rx_dca(struct igb_ring *); |
111 | static void igb_update_tx_dca(struct igb_ring *); | 112 | static void igb_update_tx_dca(struct igb_ring *); |
112 | static void igb_setup_dca(struct igb_adapter *); | 113 | static void igb_setup_dca(struct igb_adapter *); |
113 | #endif /* CONFIG_DCA */ | 114 | #endif /* CONFIG_IGB_DCA */ |
114 | static bool igb_clean_tx_irq(struct igb_ring *); | 115 | static bool igb_clean_tx_irq(struct igb_ring *); |
115 | static int igb_poll(struct napi_struct *, int); | 116 | static int igb_poll(struct napi_struct *, int); |
116 | static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); | 117 | static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); |
@@ -131,7 +132,7 @@ static int igb_suspend(struct pci_dev *, pm_message_t); | |||
131 | static int igb_resume(struct pci_dev *); | 132 | static int igb_resume(struct pci_dev *); |
132 | #endif | 133 | #endif |
133 | static void igb_shutdown(struct pci_dev *); | 134 | static void igb_shutdown(struct pci_dev *); |
134 | #ifdef CONFIG_DCA | 135 | #ifdef CONFIG_IGB_DCA |
135 | static int igb_notify_dca(struct notifier_block *, unsigned long, void *); | 136 | static int igb_notify_dca(struct notifier_block *, unsigned long, void *); |
136 | static struct notifier_block dca_notifier = { | 137 | static struct notifier_block dca_notifier = { |
137 | .notifier_call = igb_notify_dca, | 138 | .notifier_call = igb_notify_dca, |
@@ -207,7 +208,7 @@ static int __init igb_init_module(void) | |||
207 | global_quad_port_a = 0; | 208 | global_quad_port_a = 0; |
208 | 209 | ||
209 | ret = pci_register_driver(&igb_driver); | 210 | ret = pci_register_driver(&igb_driver); |
210 | #ifdef CONFIG_DCA | 211 | #ifdef CONFIG_IGB_DCA |
211 | dca_register_notify(&dca_notifier); | 212 | dca_register_notify(&dca_notifier); |
212 | #endif | 213 | #endif |
213 | return ret; | 214 | return ret; |
@@ -223,7 +224,7 @@ module_init(igb_init_module); | |||
223 | **/ | 224 | **/ |
224 | static void __exit igb_exit_module(void) | 225 | static void __exit igb_exit_module(void) |
225 | { | 226 | { |
226 | #ifdef CONFIG_DCA | 227 | #ifdef CONFIG_IGB_DCA |
227 | dca_unregister_notify(&dca_notifier); | 228 | dca_unregister_notify(&dca_notifier); |
228 | #endif | 229 | #endif |
229 | pci_unregister_driver(&igb_driver); | 230 | pci_unregister_driver(&igb_driver); |
@@ -966,10 +967,11 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
966 | struct net_device *netdev; | 967 | struct net_device *netdev; |
967 | struct igb_adapter *adapter; | 968 | struct igb_adapter *adapter; |
968 | struct e1000_hw *hw; | 969 | struct e1000_hw *hw; |
970 | struct pci_dev *us_dev; | ||
969 | const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; | 971 | const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; |
970 | unsigned long mmio_start, mmio_len; | 972 | unsigned long mmio_start, mmio_len; |
971 | int i, err, pci_using_dac; | 973 | int i, err, pci_using_dac, pos; |
972 | u16 eeprom_data = 0; | 974 | u16 eeprom_data = 0, state = 0; |
973 | u16 eeprom_apme_mask = IGB_EEPROM_APME; | 975 | u16 eeprom_apme_mask = IGB_EEPROM_APME; |
974 | u32 part_num; | 976 | u32 part_num; |
975 | int bars, need_ioport; | 977 | int bars, need_ioport; |
@@ -1004,6 +1006,28 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1004 | } | 1006 | } |
1005 | } | 1007 | } |
1006 | 1008 | ||
1009 | /* 82575 requires that the pci-e link partner disable the L0s state */ | ||
1010 | switch (pdev->device) { | ||
1011 | case E1000_DEV_ID_82575EB_COPPER: | ||
1012 | case E1000_DEV_ID_82575EB_FIBER_SERDES: | ||
1013 | case E1000_DEV_ID_82575GB_QUAD_COPPER: | ||
1014 | us_dev = pdev->bus->self; | ||
1015 | pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP); | ||
1016 | if (pos) { | ||
1017 | pci_read_config_word(us_dev, pos + PCI_EXP_LNKCTL, | ||
1018 | &state); | ||
1019 | state &= ~PCIE_LINK_STATE_L0S; | ||
1020 | pci_write_config_word(us_dev, pos + PCI_EXP_LNKCTL, | ||
1021 | state); | ||
1022 | printk(KERN_INFO "Disabling ASPM L0s upstream switch " | ||
1023 | "port %x:%x.%x\n", us_dev->bus->number, | ||
1024 | PCI_SLOT(us_dev->devfn), | ||
1025 | PCI_FUNC(us_dev->devfn)); | ||
1026 | } | ||
1027 | default: | ||
1028 | break; | ||
1029 | } | ||
1030 | |||
1007 | err = pci_request_selected_regions(pdev, bars, igb_driver_name); | 1031 | err = pci_request_selected_regions(pdev, bars, igb_driver_name); |
1008 | if (err) | 1032 | if (err) |
1009 | goto err_pci_reg; | 1033 | goto err_pci_reg; |
@@ -1237,7 +1261,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1237 | if (err) | 1261 | if (err) |
1238 | goto err_register; | 1262 | goto err_register; |
1239 | 1263 | ||
1240 | #ifdef CONFIG_DCA | 1264 | #ifdef CONFIG_IGB_DCA |
1241 | if ((adapter->flags & IGB_FLAG_HAS_DCA) && | 1265 | if ((adapter->flags & IGB_FLAG_HAS_DCA) && |
1242 | (dca_add_requester(&pdev->dev) == 0)) { | 1266 | (dca_add_requester(&pdev->dev) == 0)) { |
1243 | adapter->flags |= IGB_FLAG_DCA_ENABLED; | 1267 | adapter->flags |= IGB_FLAG_DCA_ENABLED; |
@@ -1311,7 +1335,7 @@ static void __devexit igb_remove(struct pci_dev *pdev) | |||
1311 | { | 1335 | { |
1312 | struct net_device *netdev = pci_get_drvdata(pdev); | 1336 | struct net_device *netdev = pci_get_drvdata(pdev); |
1313 | struct igb_adapter *adapter = netdev_priv(netdev); | 1337 | struct igb_adapter *adapter = netdev_priv(netdev); |
1314 | #ifdef CONFIG_DCA | 1338 | #ifdef CONFIG_IGB_DCA |
1315 | struct e1000_hw *hw = &adapter->hw; | 1339 | struct e1000_hw *hw = &adapter->hw; |
1316 | #endif | 1340 | #endif |
1317 | 1341 | ||
@@ -1323,7 +1347,7 @@ static void __devexit igb_remove(struct pci_dev *pdev) | |||
1323 | 1347 | ||
1324 | flush_scheduled_work(); | 1348 | flush_scheduled_work(); |
1325 | 1349 | ||
1326 | #ifdef CONFIG_DCA | 1350 | #ifdef CONFIG_IGB_DCA |
1327 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) { | 1351 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) { |
1328 | dev_info(&pdev->dev, "DCA disabled\n"); | 1352 | dev_info(&pdev->dev, "DCA disabled\n"); |
1329 | dca_remove_requester(&pdev->dev); | 1353 | dca_remove_requester(&pdev->dev); |
@@ -3271,7 +3295,7 @@ static irqreturn_t igb_msix_tx(int irq, void *data) | |||
3271 | struct igb_adapter *adapter = tx_ring->adapter; | 3295 | struct igb_adapter *adapter = tx_ring->adapter; |
3272 | struct e1000_hw *hw = &adapter->hw; | 3296 | struct e1000_hw *hw = &adapter->hw; |
3273 | 3297 | ||
3274 | #ifdef CONFIG_DCA | 3298 | #ifdef CONFIG_IGB_DCA |
3275 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) | 3299 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) |
3276 | igb_update_tx_dca(tx_ring); | 3300 | igb_update_tx_dca(tx_ring); |
3277 | #endif | 3301 | #endif |
@@ -3323,14 +3347,14 @@ static irqreturn_t igb_msix_rx(int irq, void *data) | |||
3323 | if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) | 3347 | if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) |
3324 | __netif_rx_schedule(adapter->netdev, &rx_ring->napi); | 3348 | __netif_rx_schedule(adapter->netdev, &rx_ring->napi); |
3325 | 3349 | ||
3326 | #ifdef CONFIG_DCA | 3350 | #ifdef CONFIG_IGB_DCA |
3327 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) | 3351 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) |
3328 | igb_update_rx_dca(rx_ring); | 3352 | igb_update_rx_dca(rx_ring); |
3329 | #endif | 3353 | #endif |
3330 | return IRQ_HANDLED; | 3354 | return IRQ_HANDLED; |
3331 | } | 3355 | } |
3332 | 3356 | ||
3333 | #ifdef CONFIG_DCA | 3357 | #ifdef CONFIG_IGB_DCA |
3334 | static void igb_update_rx_dca(struct igb_ring *rx_ring) | 3358 | static void igb_update_rx_dca(struct igb_ring *rx_ring) |
3335 | { | 3359 | { |
3336 | u32 dca_rxctrl; | 3360 | u32 dca_rxctrl; |
@@ -3450,7 +3474,7 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event, | |||
3450 | 3474 | ||
3451 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; | 3475 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; |
3452 | } | 3476 | } |
3453 | #endif /* CONFIG_DCA */ | 3477 | #endif /* CONFIG_IGB_DCA */ |
3454 | 3478 | ||
3455 | /** | 3479 | /** |
3456 | * igb_intr_msi - Interrupt Handler | 3480 | * igb_intr_msi - Interrupt Handler |
@@ -3529,13 +3553,13 @@ static int igb_poll(struct napi_struct *napi, int budget) | |||
3529 | int tx_clean_complete, work_done = 0; | 3553 | int tx_clean_complete, work_done = 0; |
3530 | 3554 | ||
3531 | /* this poll routine only supports one tx and one rx queue */ | 3555 | /* this poll routine only supports one tx and one rx queue */ |
3532 | #ifdef CONFIG_DCA | 3556 | #ifdef CONFIG_IGB_DCA |
3533 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) | 3557 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) |
3534 | igb_update_tx_dca(&adapter->tx_ring[0]); | 3558 | igb_update_tx_dca(&adapter->tx_ring[0]); |
3535 | #endif | 3559 | #endif |
3536 | tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]); | 3560 | tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]); |
3537 | 3561 | ||
3538 | #ifdef CONFIG_DCA | 3562 | #ifdef CONFIG_IGB_DCA |
3539 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) | 3563 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) |
3540 | igb_update_rx_dca(&adapter->rx_ring[0]); | 3564 | igb_update_rx_dca(&adapter->rx_ring[0]); |
3541 | #endif | 3565 | #endif |
@@ -3563,7 +3587,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget) | |||
3563 | struct net_device *netdev = adapter->netdev; | 3587 | struct net_device *netdev = adapter->netdev; |
3564 | int work_done = 0; | 3588 | int work_done = 0; |
3565 | 3589 | ||
3566 | #ifdef CONFIG_DCA | 3590 | #ifdef CONFIG_IGB_DCA |
3567 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) | 3591 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) |
3568 | igb_update_rx_dca(rx_ring); | 3592 | igb_update_rx_dca(rx_ring); |
3569 | #endif | 3593 | #endif |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index a9aebad52652..b1556b2e404c 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -75,7 +75,7 @@ | |||
75 | #include "myri10ge_mcp.h" | 75 | #include "myri10ge_mcp.h" |
76 | #include "myri10ge_mcp_gen_header.h" | 76 | #include "myri10ge_mcp_gen_header.h" |
77 | 77 | ||
78 | #define MYRI10GE_VERSION_STR "1.4.3-1.369" | 78 | #define MYRI10GE_VERSION_STR "1.4.3-1.371" |
79 | 79 | ||
80 | MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); | 80 | MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); |
81 | MODULE_AUTHOR("Maintainer: help@myri.com"); | 81 | MODULE_AUTHOR("Maintainer: help@myri.com"); |
@@ -2497,6 +2497,10 @@ static int myri10ge_open(struct net_device *dev) | |||
2497 | return 0; | 2497 | return 0; |
2498 | 2498 | ||
2499 | abort_with_rings: | 2499 | abort_with_rings: |
2500 | while (slice) { | ||
2501 | slice--; | ||
2502 | napi_disable(&mgp->ss[slice].napi); | ||
2503 | } | ||
2500 | for (i = 0; i < mgp->num_slices; i++) | 2504 | for (i = 0; i < mgp->num_slices; i++) |
2501 | myri10ge_free_rings(&mgp->ss[i]); | 2505 | myri10ge_free_rings(&mgp->ss[i]); |
2502 | 2506 | ||
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 38116f9d4163..ba2e1c5b6bcf 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h | |||
@@ -1375,7 +1375,6 @@ struct ql_adapter { | |||
1375 | spinlock_t adapter_lock; | 1375 | spinlock_t adapter_lock; |
1376 | spinlock_t hw_lock; | 1376 | spinlock_t hw_lock; |
1377 | spinlock_t stats_lock; | 1377 | spinlock_t stats_lock; |
1378 | spinlock_t legacy_lock; /* used for maintaining legacy intr sync */ | ||
1379 | 1378 | ||
1380 | /* PCI Bus Relative Register Addresses */ | 1379 | /* PCI Bus Relative Register Addresses */ |
1381 | void __iomem *reg_base; | 1380 | void __iomem *reg_base; |
@@ -1399,8 +1398,6 @@ struct ql_adapter { | |||
1399 | struct msix_entry *msi_x_entry; | 1398 | struct msix_entry *msi_x_entry; |
1400 | struct intr_context intr_context[MAX_RX_RINGS]; | 1399 | struct intr_context intr_context[MAX_RX_RINGS]; |
1401 | 1400 | ||
1402 | int (*legacy_check) (struct ql_adapter *); | ||
1403 | |||
1404 | int tx_ring_count; /* One per online CPU. */ | 1401 | int tx_ring_count; /* One per online CPU. */ |
1405 | u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */ | 1402 | u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */ |
1406 | u32 rss_ring_count; /* One per online CPU. */ | 1403 | u32 rss_ring_count; /* One per online CPU. */ |
@@ -1502,7 +1499,7 @@ void ql_mpi_work(struct work_struct *work); | |||
1502 | void ql_mpi_reset_work(struct work_struct *work); | 1499 | void ql_mpi_reset_work(struct work_struct *work); |
1503 | int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); | 1500 | int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); |
1504 | void ql_queue_asic_error(struct ql_adapter *qdev); | 1501 | void ql_queue_asic_error(struct ql_adapter *qdev); |
1505 | void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); | 1502 | u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); |
1506 | void ql_set_ethtool_ops(struct net_device *ndev); | 1503 | void ql_set_ethtool_ops(struct net_device *ndev); |
1507 | int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); | 1504 | int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); |
1508 | 1505 | ||
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 4b2caa6b7ac5..b83a9c9b6a97 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -577,41 +577,53 @@ static void ql_disable_interrupts(struct ql_adapter *qdev) | |||
577 | * incremented everytime we queue a worker and decremented everytime | 577 | * incremented everytime we queue a worker and decremented everytime |
578 | * a worker finishes. Once it hits zero we enable the interrupt. | 578 | * a worker finishes. Once it hits zero we enable the interrupt. |
579 | */ | 579 | */ |
580 | void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) | 580 | u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) |
581 | { | 581 | { |
582 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) | 582 | u32 var = 0; |
583 | unsigned long hw_flags = 0; | ||
584 | struct intr_context *ctx = qdev->intr_context + intr; | ||
585 | |||
586 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { | ||
587 | /* Always enable if we're MSIX multi interrupts and | ||
588 | * it's not the default (zeroeth) interrupt. | ||
589 | */ | ||
583 | ql_write32(qdev, INTR_EN, | 590 | ql_write32(qdev, INTR_EN, |
584 | qdev->intr_context[intr].intr_en_mask); | 591 | ctx->intr_en_mask); |
585 | else { | 592 | var = ql_read32(qdev, STS); |
586 | if (qdev->legacy_check) | 593 | return var; |
587 | spin_lock(&qdev->legacy_lock); | ||
588 | if (atomic_dec_and_test(&qdev->intr_context[intr].irq_cnt)) { | ||
589 | QPRINTK(qdev, INTR, ERR, "Enabling interrupt %d.\n", | ||
590 | intr); | ||
591 | ql_write32(qdev, INTR_EN, | ||
592 | qdev->intr_context[intr].intr_en_mask); | ||
593 | } else { | ||
594 | QPRINTK(qdev, INTR, ERR, | ||
595 | "Skip enable, other queue(s) are active.\n"); | ||
596 | } | ||
597 | if (qdev->legacy_check) | ||
598 | spin_unlock(&qdev->legacy_lock); | ||
599 | } | 594 | } |
595 | |||
596 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
597 | if (atomic_dec_and_test(&ctx->irq_cnt)) { | ||
598 | ql_write32(qdev, INTR_EN, | ||
599 | ctx->intr_en_mask); | ||
600 | var = ql_read32(qdev, STS); | ||
601 | } | ||
602 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
603 | return var; | ||
600 | } | 604 | } |
601 | 605 | ||
602 | static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) | 606 | static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) |
603 | { | 607 | { |
604 | u32 var = 0; | 608 | u32 var = 0; |
609 | unsigned long hw_flags; | ||
610 | struct intr_context *ctx; | ||
605 | 611 | ||
606 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) | 612 | /* HW disables for us if we're MSIX multi interrupts and |
607 | goto exit; | 613 | * it's not the default (zeroeth) interrupt. |
608 | else if (!atomic_read(&qdev->intr_context[intr].irq_cnt)) { | 614 | */ |
615 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) | ||
616 | return 0; | ||
617 | |||
618 | ctx = qdev->intr_context + intr; | ||
619 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
620 | if (!atomic_read(&ctx->irq_cnt)) { | ||
609 | ql_write32(qdev, INTR_EN, | 621 | ql_write32(qdev, INTR_EN, |
610 | qdev->intr_context[intr].intr_dis_mask); | 622 | ctx->intr_dis_mask); |
611 | var = ql_read32(qdev, STS); | 623 | var = ql_read32(qdev, STS); |
612 | } | 624 | } |
613 | atomic_inc(&qdev->intr_context[intr].irq_cnt); | 625 | atomic_inc(&ctx->irq_cnt); |
614 | exit: | 626 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
615 | return var; | 627 | return var; |
616 | } | 628 | } |
617 | 629 | ||
@@ -623,7 +635,9 @@ static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) | |||
623 | * and enables only if the result is zero. | 635 | * and enables only if the result is zero. |
624 | * So we precharge it here. | 636 | * So we precharge it here. |
625 | */ | 637 | */ |
626 | atomic_set(&qdev->intr_context[i].irq_cnt, 1); | 638 | if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || |
639 | i == 0)) | ||
640 | atomic_set(&qdev->intr_context[i].irq_cnt, 1); | ||
627 | ql_enable_completion_interrupt(qdev, i); | 641 | ql_enable_completion_interrupt(qdev, i); |
628 | } | 642 | } |
629 | 643 | ||
@@ -1725,19 +1739,6 @@ static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) | |||
1725 | return IRQ_HANDLED; | 1739 | return IRQ_HANDLED; |
1726 | } | 1740 | } |
1727 | 1741 | ||
1728 | /* We check here to see if we're already handling a legacy | ||
1729 | * interrupt. If we are, then it must belong to another | ||
1730 | * chip with which we're sharing the interrupt line. | ||
1731 | */ | ||
1732 | int ql_legacy_check(struct ql_adapter *qdev) | ||
1733 | { | ||
1734 | int err; | ||
1735 | spin_lock(&qdev->legacy_lock); | ||
1736 | err = atomic_read(&qdev->intr_context[0].irq_cnt); | ||
1737 | spin_unlock(&qdev->legacy_lock); | ||
1738 | return err; | ||
1739 | } | ||
1740 | |||
1741 | /* This handles a fatal error, MPI activity, and the default | 1742 | /* This handles a fatal error, MPI activity, and the default |
1742 | * rx_ring in an MSI-X multiple vector environment. | 1743 | * rx_ring in an MSI-X multiple vector environment. |
1743 | * In MSI/Legacy environment it also process the rest of | 1744 | * In MSI/Legacy environment it also process the rest of |
@@ -1752,12 +1753,15 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) | |||
1752 | int i; | 1753 | int i; |
1753 | int work_done = 0; | 1754 | int work_done = 0; |
1754 | 1755 | ||
1755 | if (qdev->legacy_check && qdev->legacy_check(qdev)) { | 1756 | spin_lock(&qdev->hw_lock); |
1756 | QPRINTK(qdev, INTR, INFO, "Already busy, not our interrupt.\n"); | 1757 | if (atomic_read(&qdev->intr_context[0].irq_cnt)) { |
1757 | return IRQ_NONE; /* Not our interrupt */ | 1758 | QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n"); |
1759 | spin_unlock(&qdev->hw_lock); | ||
1760 | return IRQ_NONE; | ||
1758 | } | 1761 | } |
1762 | spin_unlock(&qdev->hw_lock); | ||
1759 | 1763 | ||
1760 | var = ql_read32(qdev, STS); | 1764 | var = ql_disable_completion_interrupt(qdev, intr_context->intr); |
1761 | 1765 | ||
1762 | /* | 1766 | /* |
1763 | * Check for fatal error. | 1767 | * Check for fatal error. |
@@ -1823,6 +1827,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) | |||
1823 | } | 1827 | } |
1824 | } | 1828 | } |
1825 | } | 1829 | } |
1830 | ql_enable_completion_interrupt(qdev, intr_context->intr); | ||
1826 | return work_done ? IRQ_HANDLED : IRQ_NONE; | 1831 | return work_done ? IRQ_HANDLED : IRQ_NONE; |
1827 | } | 1832 | } |
1828 | 1833 | ||
@@ -2701,8 +2706,6 @@ msi: | |||
2701 | } | 2706 | } |
2702 | } | 2707 | } |
2703 | irq_type = LEG_IRQ; | 2708 | irq_type = LEG_IRQ; |
2704 | spin_lock_init(&qdev->legacy_lock); | ||
2705 | qdev->legacy_check = ql_legacy_check; | ||
2706 | QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); | 2709 | QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); |
2707 | } | 2710 | } |
2708 | 2711 | ||
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index c821da21d8eb..2b4e975770f3 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -81,6 +81,10 @@ static const int multicast_filter_limit = 32; | |||
81 | #define RTL8169_TX_TIMEOUT (6*HZ) | 81 | #define RTL8169_TX_TIMEOUT (6*HZ) |
82 | #define RTL8169_PHY_TIMEOUT (10*HZ) | 82 | #define RTL8169_PHY_TIMEOUT (10*HZ) |
83 | 83 | ||
84 | #define RTL_EEPROM_SIG cpu_to_le32(0x8129) | ||
85 | #define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff) | ||
86 | #define RTL_EEPROM_SIG_ADDR 0x0000 | ||
87 | |||
84 | /* write/read MMIO register */ | 88 | /* write/read MMIO register */ |
85 | #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) | 89 | #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) |
86 | #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) | 90 | #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) |
@@ -1944,14 +1948,15 @@ static void rtl_init_mac_address(struct rtl8169_private *tp, | |||
1944 | void __iomem *ioaddr) | 1948 | void __iomem *ioaddr) |
1945 | { | 1949 | { |
1946 | struct pci_dev *pdev = tp->pci_dev; | 1950 | struct pci_dev *pdev = tp->pci_dev; |
1947 | u8 cfg1; | ||
1948 | int vpd_cap; | 1951 | int vpd_cap; |
1952 | __le32 sig; | ||
1949 | u8 mac[8]; | 1953 | u8 mac[8]; |
1950 | DECLARE_MAC_BUF(buf); | 1954 | u8 cfg1; |
1951 | 1955 | ||
1952 | cfg1 = RTL_R8(Config1); | 1956 | cfg1 = RTL_R8(Config1); |
1953 | if (!(cfg1 & VPD)) { | 1957 | if (!(cfg1 & VPD)) { |
1954 | dprintk("VPD access not enabled, enabling\n"); | 1958 | if (netif_msg_probe(tp)) |
1959 | dev_info(&pdev->dev, "VPD access disabled, enabling\n"); | ||
1955 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 1960 | RTL_W8(Cfg9346, Cfg9346_Unlock); |
1956 | RTL_W8(Config1, cfg1 | VPD); | 1961 | RTL_W8(Config1, cfg1 | VPD); |
1957 | RTL_W8(Cfg9346, Cfg9346_Lock); | 1962 | RTL_W8(Cfg9346, Cfg9346_Lock); |
@@ -1961,7 +1966,16 @@ static void rtl_init_mac_address(struct rtl8169_private *tp, | |||
1961 | if (!vpd_cap) | 1966 | if (!vpd_cap) |
1962 | return; | 1967 | return; |
1963 | 1968 | ||
1964 | /* MAC address is stored in EEPROM at offset 0x0e | 1969 | if (rtl_eeprom_read(pdev, vpd_cap, RTL_EEPROM_SIG_ADDR, &sig) < 0) |
1970 | return; | ||
1971 | |||
1972 | if ((sig & RTL_EEPROM_SIG_MASK) != RTL_EEPROM_SIG) { | ||
1973 | dev_info(&pdev->dev, "Missing EEPROM signature: %08x\n", sig); | ||
1974 | return; | ||
1975 | } | ||
1976 | |||
1977 | /* | ||
1978 | * MAC address is stored in EEPROM at offset 0x0e | ||
1965 | * Realtek says: "The VPD address does not have to be a DWORD-aligned | 1979 | * Realtek says: "The VPD address does not have to be a DWORD-aligned |
1966 | * address as defined in the PCI 2.2 Specifications, but the VPD data | 1980 | * address as defined in the PCI 2.2 Specifications, but the VPD data |
1967 | * is always consecutive 4-byte data starting from the VPD address | 1981 | * is always consecutive 4-byte data starting from the VPD address |
@@ -1969,14 +1983,22 @@ static void rtl_init_mac_address(struct rtl8169_private *tp, | |||
1969 | */ | 1983 | */ |
1970 | if (rtl_eeprom_read(pdev, vpd_cap, 0x000e, (__le32*)&mac[0]) < 0 || | 1984 | if (rtl_eeprom_read(pdev, vpd_cap, 0x000e, (__le32*)&mac[0]) < 0 || |
1971 | rtl_eeprom_read(pdev, vpd_cap, 0x0012, (__le32*)&mac[4]) < 0) { | 1985 | rtl_eeprom_read(pdev, vpd_cap, 0x0012, (__le32*)&mac[4]) < 0) { |
1972 | dprintk("Reading MAC address from EEPROM failed\n"); | 1986 | if (netif_msg_probe(tp)) { |
1987 | dev_warn(&pdev->dev, | ||
1988 | "reading MAC address from EEPROM failed\n"); | ||
1989 | } | ||
1973 | return; | 1990 | return; |
1974 | } | 1991 | } |
1975 | 1992 | ||
1976 | dprintk("MAC address found in EEPROM: %s\n", print_mac(buf, mac)); | 1993 | if (netif_msg_probe(tp)) { |
1994 | DECLARE_MAC_BUF(buf); | ||
1995 | |||
1996 | dev_info(&pdev->dev, "MAC address found in EEPROM: %s\n", | ||
1997 | print_mac(buf, mac)); | ||
1998 | } | ||
1977 | 1999 | ||
1978 | /* Write MAC address */ | 2000 | if (is_valid_ether_addr(mac)) |
1979 | rtl_rar_set(tp, mac); | 2001 | rtl_rar_set(tp, mac); |
1980 | } | 2002 | } |
1981 | 2003 | ||
1982 | static int __devinit | 2004 | static int __devinit |
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index 3fe01763760e..e6e3bf58a569 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c | |||
@@ -317,6 +317,7 @@ static struct mii_chip_info { | |||
317 | unsigned int type; | 317 | unsigned int type; |
318 | u32 feature; | 318 | u32 feature; |
319 | } mii_chip_table[] = { | 319 | } mii_chip_table[] = { |
320 | { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 }, | ||
320 | { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 }, | 321 | { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 }, |
321 | { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 }, | 322 | { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 }, |
322 | { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 }, | 323 | { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 }, |
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index 8aa7460ef0e3..f59c7772f344 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c | |||
@@ -155,23 +155,17 @@ static void PRINT_PKT(u_char *buf, int length) | |||
155 | /* this enables an interrupt in the interrupt mask register */ | 155 | /* this enables an interrupt in the interrupt mask register */ |
156 | #define SMC_ENABLE_INT(lp, x) do { \ | 156 | #define SMC_ENABLE_INT(lp, x) do { \ |
157 | unsigned int __mask; \ | 157 | unsigned int __mask; \ |
158 | unsigned long __flags; \ | ||
159 | spin_lock_irqsave(&lp->lock, __flags); \ | ||
160 | __mask = SMC_GET_INT_EN((lp)); \ | 158 | __mask = SMC_GET_INT_EN((lp)); \ |
161 | __mask |= (x); \ | 159 | __mask |= (x); \ |
162 | SMC_SET_INT_EN((lp), __mask); \ | 160 | SMC_SET_INT_EN((lp), __mask); \ |
163 | spin_unlock_irqrestore(&lp->lock, __flags); \ | ||
164 | } while (0) | 161 | } while (0) |
165 | 162 | ||
166 | /* this disables an interrupt from the interrupt mask register */ | 163 | /* this disables an interrupt from the interrupt mask register */ |
167 | #define SMC_DISABLE_INT(lp, x) do { \ | 164 | #define SMC_DISABLE_INT(lp, x) do { \ |
168 | unsigned int __mask; \ | 165 | unsigned int __mask; \ |
169 | unsigned long __flags; \ | ||
170 | spin_lock_irqsave(&lp->lock, __flags); \ | ||
171 | __mask = SMC_GET_INT_EN((lp)); \ | 166 | __mask = SMC_GET_INT_EN((lp)); \ |
172 | __mask &= ~(x); \ | 167 | __mask &= ~(x); \ |
173 | SMC_SET_INT_EN((lp), __mask); \ | 168 | SMC_SET_INT_EN((lp), __mask); \ |
174 | spin_unlock_irqrestore(&lp->lock, __flags); \ | ||
175 | } while (0) | 169 | } while (0) |
176 | 170 | ||
177 | /* | 171 | /* |
@@ -180,7 +174,7 @@ static void PRINT_PKT(u_char *buf, int length) | |||
180 | static void smc911x_reset(struct net_device *dev) | 174 | static void smc911x_reset(struct net_device *dev) |
181 | { | 175 | { |
182 | struct smc911x_local *lp = netdev_priv(dev); | 176 | struct smc911x_local *lp = netdev_priv(dev); |
183 | unsigned int reg, timeout=0, resets=1; | 177 | unsigned int reg, timeout=0, resets=1, irq_cfg; |
184 | unsigned long flags; | 178 | unsigned long flags; |
185 | 179 | ||
186 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); | 180 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); |
@@ -252,7 +246,12 @@ static void smc911x_reset(struct net_device *dev) | |||
252 | * Deassert IRQ for 1*10us for edge type interrupts | 246 | * Deassert IRQ for 1*10us for edge type interrupts |
253 | * and drive IRQ pin push-pull | 247 | * and drive IRQ pin push-pull |
254 | */ | 248 | */ |
255 | SMC_SET_IRQ_CFG(lp, (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_); | 249 | irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_; |
250 | #ifdef SMC_DYNAMIC_BUS_CONFIG | ||
251 | if (lp->cfg.irq_polarity) | ||
252 | irq_cfg |= INT_CFG_IRQ_POL_; | ||
253 | #endif | ||
254 | SMC_SET_IRQ_CFG(lp, irq_cfg); | ||
256 | 255 | ||
257 | /* clear anything saved */ | 256 | /* clear anything saved */ |
258 | if (lp->pending_tx_skb != NULL) { | 257 | if (lp->pending_tx_skb != NULL) { |
@@ -274,6 +273,8 @@ static void smc911x_enable(struct net_device *dev) | |||
274 | 273 | ||
275 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); | 274 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); |
276 | 275 | ||
276 | spin_lock_irqsave(&lp->lock, flags); | ||
277 | |||
277 | SMC_SET_MAC_ADDR(lp, dev->dev_addr); | 278 | SMC_SET_MAC_ADDR(lp, dev->dev_addr); |
278 | 279 | ||
279 | /* Enable TX */ | 280 | /* Enable TX */ |
@@ -286,12 +287,10 @@ static void smc911x_enable(struct net_device *dev) | |||
286 | SMC_SET_FIFO_TSL(lp, 64); | 287 | SMC_SET_FIFO_TSL(lp, 64); |
287 | SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); | 288 | SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); |
288 | 289 | ||
289 | spin_lock_irqsave(&lp->lock, flags); | ||
290 | SMC_GET_MAC_CR(lp, cr); | 290 | SMC_GET_MAC_CR(lp, cr); |
291 | cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_; | 291 | cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_; |
292 | SMC_SET_MAC_CR(lp, cr); | 292 | SMC_SET_MAC_CR(lp, cr); |
293 | SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_); | 293 | SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_); |
294 | spin_unlock_irqrestore(&lp->lock, flags); | ||
295 | 294 | ||
296 | /* Add 2 byte padding to start of packets */ | 295 | /* Add 2 byte padding to start of packets */ |
297 | SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_); | 296 | SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_); |
@@ -300,9 +299,7 @@ static void smc911x_enable(struct net_device *dev) | |||
300 | if (cr & MAC_CR_RXEN_) | 299 | if (cr & MAC_CR_RXEN_) |
301 | DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name); | 300 | DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name); |
302 | 301 | ||
303 | spin_lock_irqsave(&lp->lock, flags); | ||
304 | SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_); | 302 | SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_); |
305 | spin_unlock_irqrestore(&lp->lock, flags); | ||
306 | 303 | ||
307 | /* Interrupt on every received packet */ | 304 | /* Interrupt on every received packet */ |
308 | SMC_SET_FIFO_RSA(lp, 0x01); | 305 | SMC_SET_FIFO_RSA(lp, 0x01); |
@@ -318,6 +315,8 @@ static void smc911x_enable(struct net_device *dev) | |||
318 | mask|=INT_EN_RDFO_EN_; | 315 | mask|=INT_EN_RDFO_EN_; |
319 | } | 316 | } |
320 | SMC_ENABLE_INT(lp, mask); | 317 | SMC_ENABLE_INT(lp, mask); |
318 | |||
319 | spin_unlock_irqrestore(&lp->lock, flags); | ||
321 | } | 320 | } |
322 | 321 | ||
323 | /* | 322 | /* |
@@ -458,7 +457,6 @@ static void smc911x_hardware_send_pkt(struct net_device *dev) | |||
458 | struct sk_buff *skb; | 457 | struct sk_buff *skb; |
459 | unsigned int cmdA, cmdB, len; | 458 | unsigned int cmdA, cmdB, len; |
460 | unsigned char *buf; | 459 | unsigned char *buf; |
461 | unsigned long flags; | ||
462 | 460 | ||
463 | DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__); | 461 | DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__); |
464 | BUG_ON(lp->pending_tx_skb == NULL); | 462 | BUG_ON(lp->pending_tx_skb == NULL); |
@@ -503,11 +501,9 @@ static void smc911x_hardware_send_pkt(struct net_device *dev) | |||
503 | dev->trans_start = jiffies; | 501 | dev->trans_start = jiffies; |
504 | dev_kfree_skb(skb); | 502 | dev_kfree_skb(skb); |
505 | #endif | 503 | #endif |
506 | spin_lock_irqsave(&lp->lock, flags); | ||
507 | if (!lp->tx_throttle) { | 504 | if (!lp->tx_throttle) { |
508 | netif_wake_queue(dev); | 505 | netif_wake_queue(dev); |
509 | } | 506 | } |
510 | spin_unlock_irqrestore(&lp->lock, flags); | ||
511 | SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_); | 507 | SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_); |
512 | } | 508 | } |
513 | 509 | ||
@@ -526,6 +522,8 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
526 | DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", | 522 | DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", |
527 | dev->name, __func__); | 523 | dev->name, __func__); |
528 | 524 | ||
525 | spin_lock_irqsave(&lp->lock, flags); | ||
526 | |||
529 | BUG_ON(lp->pending_tx_skb != NULL); | 527 | BUG_ON(lp->pending_tx_skb != NULL); |
530 | 528 | ||
531 | free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_; | 529 | free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_; |
@@ -535,12 +533,10 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
535 | if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) { | 533 | if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) { |
536 | DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n", | 534 | DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n", |
537 | dev->name, free); | 535 | dev->name, free); |
538 | spin_lock_irqsave(&lp->lock, flags); | ||
539 | /* Reenable when at least 1 packet of size MTU present */ | 536 | /* Reenable when at least 1 packet of size MTU present */ |
540 | SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64); | 537 | SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64); |
541 | lp->tx_throttle = 1; | 538 | lp->tx_throttle = 1; |
542 | netif_stop_queue(dev); | 539 | netif_stop_queue(dev); |
543 | spin_unlock_irqrestore(&lp->lock, flags); | ||
544 | } | 540 | } |
545 | 541 | ||
546 | /* Drop packets when we run out of space in TX FIFO | 542 | /* Drop packets when we run out of space in TX FIFO |
@@ -556,6 +552,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
556 | lp->pending_tx_skb = NULL; | 552 | lp->pending_tx_skb = NULL; |
557 | dev->stats.tx_errors++; | 553 | dev->stats.tx_errors++; |
558 | dev->stats.tx_dropped++; | 554 | dev->stats.tx_dropped++; |
555 | spin_unlock_irqrestore(&lp->lock, flags); | ||
559 | dev_kfree_skb(skb); | 556 | dev_kfree_skb(skb); |
560 | return 0; | 557 | return 0; |
561 | } | 558 | } |
@@ -565,7 +562,6 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
565 | /* If the DMA is already running then defer this packet Tx until | 562 | /* If the DMA is already running then defer this packet Tx until |
566 | * the DMA IRQ starts it | 563 | * the DMA IRQ starts it |
567 | */ | 564 | */ |
568 | spin_lock_irqsave(&lp->lock, flags); | ||
569 | if (lp->txdma_active) { | 565 | if (lp->txdma_active) { |
570 | DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name); | 566 | DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name); |
571 | lp->pending_tx_skb = skb; | 567 | lp->pending_tx_skb = skb; |
@@ -576,11 +572,11 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
576 | DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name); | 572 | DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name); |
577 | lp->txdma_active = 1; | 573 | lp->txdma_active = 1; |
578 | } | 574 | } |
579 | spin_unlock_irqrestore(&lp->lock, flags); | ||
580 | } | 575 | } |
581 | #endif | 576 | #endif |
582 | lp->pending_tx_skb = skb; | 577 | lp->pending_tx_skb = skb; |
583 | smc911x_hardware_send_pkt(dev); | 578 | smc911x_hardware_send_pkt(dev); |
579 | spin_unlock_irqrestore(&lp->lock, flags); | ||
584 | 580 | ||
585 | return 0; | 581 | return 0; |
586 | } | 582 | } |
@@ -1242,7 +1238,7 @@ smc911x_rx_dma_irq(int dma, void *data) | |||
1242 | netif_rx(skb); | 1238 | netif_rx(skb); |
1243 | 1239 | ||
1244 | spin_lock_irqsave(&lp->lock, flags); | 1240 | spin_lock_irqsave(&lp->lock, flags); |
1245 | pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16; | 1241 | pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16; |
1246 | if (pkts != 0) { | 1242 | if (pkts != 0) { |
1247 | smc911x_rcv(dev); | 1243 | smc911x_rcv(dev); |
1248 | }else { | 1244 | }else { |
@@ -2054,7 +2050,7 @@ err_out: | |||
2054 | */ | 2050 | */ |
2055 | static int smc911x_drv_probe(struct platform_device *pdev) | 2051 | static int smc911x_drv_probe(struct platform_device *pdev) |
2056 | { | 2052 | { |
2057 | struct smc91x_platdata *pd = pdev->dev.platform_data; | 2053 | struct smc911x_platdata *pd = pdev->dev.platform_data; |
2058 | struct net_device *ndev; | 2054 | struct net_device *ndev; |
2059 | struct resource *res; | 2055 | struct resource *res; |
2060 | struct smc911x_local *lp; | 2056 | struct smc911x_local *lp; |
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h index bf6240f23f5d..cc7d85bdfb3e 100644 --- a/drivers/net/smc911x.h +++ b/drivers/net/smc911x.h | |||
@@ -50,6 +50,10 @@ | |||
50 | #define SMC_DYNAMIC_BUS_CONFIG | 50 | #define SMC_DYNAMIC_BUS_CONFIG |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | #ifdef SMC_USE_PXA_DMA | ||
54 | #define SMC_USE_DMA | ||
55 | #endif | ||
56 | |||
53 | /* store this information for the driver.. */ | 57 | /* store this information for the driver.. */ |
54 | struct smc911x_local { | 58 | struct smc911x_local { |
55 | /* | 59 | /* |
@@ -196,8 +200,6 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg, | |||
196 | 200 | ||
197 | 201 | ||
198 | #ifdef SMC_USE_PXA_DMA | 202 | #ifdef SMC_USE_PXA_DMA |
199 | #define SMC_USE_DMA | ||
200 | |||
201 | /* | 203 | /* |
202 | * Define the request and free functions | 204 | * Define the request and free functions |
203 | * These are unfortunately architecture specific as no generic allocation | 205 | * These are unfortunately architecture specific as no generic allocation |
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 9b95c4049b31..0f1d6bdd51a2 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c | |||
@@ -340,9 +340,9 @@ static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) | |||
340 | } | 340 | } |
341 | 341 | ||
342 | /* Interrupt handling */ | 342 | /* Interrupt handling */ |
343 | static int ath5k_init(struct ath5k_softc *sc); | 343 | static int ath5k_init(struct ath5k_softc *sc, bool is_resume); |
344 | static int ath5k_stop_locked(struct ath5k_softc *sc); | 344 | static int ath5k_stop_locked(struct ath5k_softc *sc); |
345 | static int ath5k_stop_hw(struct ath5k_softc *sc); | 345 | static int ath5k_stop_hw(struct ath5k_softc *sc, bool is_suspend); |
346 | static irqreturn_t ath5k_intr(int irq, void *dev_id); | 346 | static irqreturn_t ath5k_intr(int irq, void *dev_id); |
347 | static void ath5k_tasklet_reset(unsigned long data); | 347 | static void ath5k_tasklet_reset(unsigned long data); |
348 | 348 | ||
@@ -646,7 +646,7 @@ ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state) | |||
646 | 646 | ||
647 | ath5k_led_off(sc); | 647 | ath5k_led_off(sc); |
648 | 648 | ||
649 | ath5k_stop_hw(sc); | 649 | ath5k_stop_hw(sc, true); |
650 | 650 | ||
651 | free_irq(pdev->irq, sc); | 651 | free_irq(pdev->irq, sc); |
652 | pci_save_state(pdev); | 652 | pci_save_state(pdev); |
@@ -683,7 +683,7 @@ ath5k_pci_resume(struct pci_dev *pdev) | |||
683 | goto err_no_irq; | 683 | goto err_no_irq; |
684 | } | 684 | } |
685 | 685 | ||
686 | err = ath5k_init(sc); | 686 | err = ath5k_init(sc, true); |
687 | if (err) | 687 | if (err) |
688 | goto err_irq; | 688 | goto err_irq; |
689 | ath5k_led_enable(sc); | 689 | ath5k_led_enable(sc); |
@@ -2200,12 +2200,17 @@ ath5k_beacon_config(struct ath5k_softc *sc) | |||
2200 | \********************/ | 2200 | \********************/ |
2201 | 2201 | ||
2202 | static int | 2202 | static int |
2203 | ath5k_init(struct ath5k_softc *sc) | 2203 | ath5k_init(struct ath5k_softc *sc, bool is_resume) |
2204 | { | 2204 | { |
2205 | int ret; | 2205 | int ret; |
2206 | 2206 | ||
2207 | mutex_lock(&sc->lock); | 2207 | mutex_lock(&sc->lock); |
2208 | 2208 | ||
2209 | if (is_resume && !test_bit(ATH_STAT_STARTED, sc->status)) | ||
2210 | goto out_ok; | ||
2211 | |||
2212 | __clear_bit(ATH_STAT_STARTED, sc->status); | ||
2213 | |||
2209 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode); | 2214 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode); |
2210 | 2215 | ||
2211 | /* | 2216 | /* |
@@ -2230,12 +2235,15 @@ ath5k_init(struct ath5k_softc *sc) | |||
2230 | if (ret) | 2235 | if (ret) |
2231 | goto done; | 2236 | goto done; |
2232 | 2237 | ||
2238 | __set_bit(ATH_STAT_STARTED, sc->status); | ||
2239 | |||
2233 | /* Set ack to be sent at low bit-rates */ | 2240 | /* Set ack to be sent at low bit-rates */ |
2234 | ath5k_hw_set_ack_bitrate_high(sc->ah, false); | 2241 | ath5k_hw_set_ack_bitrate_high(sc->ah, false); |
2235 | 2242 | ||
2236 | mod_timer(&sc->calib_tim, round_jiffies(jiffies + | 2243 | mod_timer(&sc->calib_tim, round_jiffies(jiffies + |
2237 | msecs_to_jiffies(ath5k_calinterval * 1000))); | 2244 | msecs_to_jiffies(ath5k_calinterval * 1000))); |
2238 | 2245 | ||
2246 | out_ok: | ||
2239 | ret = 0; | 2247 | ret = 0; |
2240 | done: | 2248 | done: |
2241 | mmiowb(); | 2249 | mmiowb(); |
@@ -2290,7 +2298,7 @@ ath5k_stop_locked(struct ath5k_softc *sc) | |||
2290 | * stop is preempted). | 2298 | * stop is preempted). |
2291 | */ | 2299 | */ |
2292 | static int | 2300 | static int |
2293 | ath5k_stop_hw(struct ath5k_softc *sc) | 2301 | ath5k_stop_hw(struct ath5k_softc *sc, bool is_suspend) |
2294 | { | 2302 | { |
2295 | int ret; | 2303 | int ret; |
2296 | 2304 | ||
@@ -2321,6 +2329,9 @@ ath5k_stop_hw(struct ath5k_softc *sc) | |||
2321 | } | 2329 | } |
2322 | } | 2330 | } |
2323 | ath5k_txbuf_free(sc, sc->bbuf); | 2331 | ath5k_txbuf_free(sc, sc->bbuf); |
2332 | if (!is_suspend) | ||
2333 | __clear_bit(ATH_STAT_STARTED, sc->status); | ||
2334 | |||
2324 | mmiowb(); | 2335 | mmiowb(); |
2325 | mutex_unlock(&sc->lock); | 2336 | mutex_unlock(&sc->lock); |
2326 | 2337 | ||
@@ -2718,12 +2729,12 @@ ath5k_reset_wake(struct ath5k_softc *sc) | |||
2718 | 2729 | ||
2719 | static int ath5k_start(struct ieee80211_hw *hw) | 2730 | static int ath5k_start(struct ieee80211_hw *hw) |
2720 | { | 2731 | { |
2721 | return ath5k_init(hw->priv); | 2732 | return ath5k_init(hw->priv, false); |
2722 | } | 2733 | } |
2723 | 2734 | ||
2724 | static void ath5k_stop(struct ieee80211_hw *hw) | 2735 | static void ath5k_stop(struct ieee80211_hw *hw) |
2725 | { | 2736 | { |
2726 | ath5k_stop_hw(hw->priv); | 2737 | ath5k_stop_hw(hw->priv, false); |
2727 | } | 2738 | } |
2728 | 2739 | ||
2729 | static int ath5k_add_interface(struct ieee80211_hw *hw, | 2740 | static int ath5k_add_interface(struct ieee80211_hw *hw, |
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h index 9d0b728928e3..06d1054ca94b 100644 --- a/drivers/net/wireless/ath5k/base.h +++ b/drivers/net/wireless/ath5k/base.h | |||
@@ -128,11 +128,12 @@ struct ath5k_softc { | |||
128 | size_t desc_len; /* size of TX/RX descriptors */ | 128 | size_t desc_len; /* size of TX/RX descriptors */ |
129 | u16 cachelsz; /* cache line size */ | 129 | u16 cachelsz; /* cache line size */ |
130 | 130 | ||
131 | DECLARE_BITMAP(status, 4); | 131 | DECLARE_BITMAP(status, 5); |
132 | #define ATH_STAT_INVALID 0 /* disable hardware accesses */ | 132 | #define ATH_STAT_INVALID 0 /* disable hardware accesses */ |
133 | #define ATH_STAT_MRRETRY 1 /* multi-rate retry support */ | 133 | #define ATH_STAT_MRRETRY 1 /* multi-rate retry support */ |
134 | #define ATH_STAT_PROMISC 2 | 134 | #define ATH_STAT_PROMISC 2 |
135 | #define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */ | 135 | #define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */ |
136 | #define ATH_STAT_STARTED 4 /* opened & irqs enabled */ | ||
136 | 137 | ||
137 | unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ | 138 | unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ |
138 | unsigned int curmode; /* current phy mode */ | 139 | unsigned int curmode; /* current phy mode */ |
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index 50904771f291..e0512e49d6d3 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c | |||
@@ -433,7 +433,7 @@ struct fw_info { | |||
433 | const static struct fw_info orinoco_fw[] = { | 433 | const static struct fw_info orinoco_fw[] = { |
434 | { "", "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 }, | 434 | { "", "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 }, |
435 | { "", "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 }, | 435 | { "", "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 }, |
436 | { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", "", 0x00003100, 0x100 } | 436 | { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", "", 0x00003100, 512 } |
437 | }; | 437 | }; |
438 | 438 | ||
439 | /* Structure used to access fields in FW | 439 | /* Structure used to access fields in FW |
@@ -458,7 +458,7 @@ orinoco_dl_firmware(struct orinoco_private *priv, | |||
458 | int ap) | 458 | int ap) |
459 | { | 459 | { |
460 | /* Plug Data Area (PDA) */ | 460 | /* Plug Data Area (PDA) */ |
461 | __le16 pda[512] = { 0 }; | 461 | __le16 *pda; |
462 | 462 | ||
463 | hermes_t *hw = &priv->hw; | 463 | hermes_t *hw = &priv->hw; |
464 | const struct firmware *fw_entry; | 464 | const struct firmware *fw_entry; |
@@ -467,7 +467,11 @@ orinoco_dl_firmware(struct orinoco_private *priv, | |||
467 | const unsigned char *end; | 467 | const unsigned char *end; |
468 | const char *firmware; | 468 | const char *firmware; |
469 | struct net_device *dev = priv->ndev; | 469 | struct net_device *dev = priv->ndev; |
470 | int err; | 470 | int err = 0; |
471 | |||
472 | pda = kzalloc(fw->pda_size, GFP_KERNEL); | ||
473 | if (!pda) | ||
474 | return -ENOMEM; | ||
471 | 475 | ||
472 | if (ap) | 476 | if (ap) |
473 | firmware = fw->ap_fw; | 477 | firmware = fw->ap_fw; |
@@ -478,17 +482,17 @@ orinoco_dl_firmware(struct orinoco_private *priv, | |||
478 | dev->name, firmware); | 482 | dev->name, firmware); |
479 | 483 | ||
480 | /* Read current plug data */ | 484 | /* Read current plug data */ |
481 | err = hermes_read_pda(hw, pda, fw->pda_addr, | 485 | err = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 0); |
482 | min_t(u16, fw->pda_size, sizeof(pda)), 0); | ||
483 | printk(KERN_DEBUG "%s: Read PDA returned %d\n", dev->name, err); | 486 | printk(KERN_DEBUG "%s: Read PDA returned %d\n", dev->name, err); |
484 | if (err) | 487 | if (err) |
485 | return err; | 488 | goto free; |
486 | 489 | ||
487 | err = request_firmware(&fw_entry, firmware, priv->dev); | 490 | err = request_firmware(&fw_entry, firmware, priv->dev); |
488 | if (err) { | 491 | if (err) { |
489 | printk(KERN_ERR "%s: Cannot find firmware %s\n", | 492 | printk(KERN_ERR "%s: Cannot find firmware %s\n", |
490 | dev->name, firmware); | 493 | dev->name, firmware); |
491 | return -ENOENT; | 494 | err = -ENOENT; |
495 | goto free; | ||
492 | } | 496 | } |
493 | 497 | ||
494 | hdr = (const struct orinoco_fw_header *) fw_entry->data; | 498 | hdr = (const struct orinoco_fw_header *) fw_entry->data; |
@@ -532,6 +536,9 @@ orinoco_dl_firmware(struct orinoco_private *priv, | |||
532 | 536 | ||
533 | abort: | 537 | abort: |
534 | release_firmware(fw_entry); | 538 | release_firmware(fw_entry); |
539 | |||
540 | free: | ||
541 | kfree(pda); | ||
535 | return err; | 542 | return err; |
536 | } | 543 | } |
537 | 544 | ||
@@ -549,12 +556,12 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw, | |||
549 | int secondary) | 556 | int secondary) |
550 | { | 557 | { |
551 | hermes_t *hw = &priv->hw; | 558 | hermes_t *hw = &priv->hw; |
552 | int ret; | 559 | int ret = 0; |
553 | const unsigned char *ptr; | 560 | const unsigned char *ptr; |
554 | const unsigned char *first_block; | 561 | const unsigned char *first_block; |
555 | 562 | ||
556 | /* Plug Data Area (PDA) */ | 563 | /* Plug Data Area (PDA) */ |
557 | __le16 pda[256]; | 564 | __le16 *pda = NULL; |
558 | 565 | ||
559 | /* Binary block begins after the 0x1A marker */ | 566 | /* Binary block begins after the 0x1A marker */ |
560 | ptr = image; | 567 | ptr = image; |
@@ -563,28 +570,33 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw, | |||
563 | 570 | ||
564 | /* Read the PDA from EEPROM */ | 571 | /* Read the PDA from EEPROM */ |
565 | if (secondary) { | 572 | if (secondary) { |
566 | ret = hermes_read_pda(hw, pda, fw->pda_addr, sizeof(pda), 1); | 573 | pda = kzalloc(fw->pda_size, GFP_KERNEL); |
574 | if (!pda) | ||
575 | return -ENOMEM; | ||
576 | |||
577 | ret = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 1); | ||
567 | if (ret) | 578 | if (ret) |
568 | return ret; | 579 | goto free; |
569 | } | 580 | } |
570 | 581 | ||
571 | /* Stop the firmware, so that it can be safely rewritten */ | 582 | /* Stop the firmware, so that it can be safely rewritten */ |
572 | if (priv->stop_fw) { | 583 | if (priv->stop_fw) { |
573 | ret = priv->stop_fw(priv, 1); | 584 | ret = priv->stop_fw(priv, 1); |
574 | if (ret) | 585 | if (ret) |
575 | return ret; | 586 | goto free; |
576 | } | 587 | } |
577 | 588 | ||
578 | /* Program the adapter with new firmware */ | 589 | /* Program the adapter with new firmware */ |
579 | ret = hermes_program(hw, first_block, end); | 590 | ret = hermes_program(hw, first_block, end); |
580 | if (ret) | 591 | if (ret) |
581 | return ret; | 592 | goto free; |
582 | 593 | ||
583 | /* Write the PDA to the adapter */ | 594 | /* Write the PDA to the adapter */ |
584 | if (secondary) { | 595 | if (secondary) { |
585 | size_t len = hermes_blocks_length(first_block); | 596 | size_t len = hermes_blocks_length(first_block); |
586 | ptr = first_block + len; | 597 | ptr = first_block + len; |
587 | ret = hermes_apply_pda(hw, ptr, pda); | 598 | ret = hermes_apply_pda(hw, ptr, pda); |
599 | kfree(pda); | ||
588 | if (ret) | 600 | if (ret) |
589 | return ret; | 601 | return ret; |
590 | } | 602 | } |
@@ -608,6 +620,10 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw, | |||
608 | return -ENODEV; | 620 | return -ENODEV; |
609 | 621 | ||
610 | return 0; | 622 | return 0; |
623 | |||
624 | free: | ||
625 | kfree(pda); | ||
626 | return ret; | ||
611 | } | 627 | } |
612 | 628 | ||
613 | 629 | ||
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c index 117c7d3a52b0..2d022f83774c 100644 --- a/drivers/net/wireless/p54/p54common.c +++ b/drivers/net/wireless/p54/p54common.c | |||
@@ -306,8 +306,8 @@ static int p54_convert_rev1(struct ieee80211_hw *dev, | |||
306 | return 0; | 306 | return 0; |
307 | } | 307 | } |
308 | 308 | ||
309 | static const char *p54_rf_chips[] = { "NULL", "Indigo?", "Duette", | 309 | static const char *p54_rf_chips[] = { "NULL", "Duette3", "Duette2", |
310 | "Frisbee", "Xbow", "Longbow" }; | 310 | "Frisbee", "Xbow", "Longbow", "NULL", "NULL" }; |
311 | static int p54_init_xbow_synth(struct ieee80211_hw *dev); | 311 | static int p54_init_xbow_synth(struct ieee80211_hw *dev); |
312 | 312 | ||
313 | static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) | 313 | static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) |
@@ -319,6 +319,7 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) | |||
319 | void *tmp; | 319 | void *tmp; |
320 | int err; | 320 | int err; |
321 | u8 *end = (u8 *)eeprom + len; | 321 | u8 *end = (u8 *)eeprom + len; |
322 | u16 synth; | ||
322 | DECLARE_MAC_BUF(mac); | 323 | DECLARE_MAC_BUF(mac); |
323 | 324 | ||
324 | wrap = (struct eeprom_pda_wrap *) eeprom; | 325 | wrap = (struct eeprom_pda_wrap *) eeprom; |
@@ -400,8 +401,8 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) | |||
400 | tmp = entry->data; | 401 | tmp = entry->data; |
401 | while ((u8 *)tmp < entry->data + data_len) { | 402 | while ((u8 *)tmp < entry->data + data_len) { |
402 | struct bootrec_exp_if *exp_if = tmp; | 403 | struct bootrec_exp_if *exp_if = tmp; |
403 | if (le16_to_cpu(exp_if->if_id) == 0xF) | 404 | if (le16_to_cpu(exp_if->if_id) == 0xf) |
404 | priv->rxhw = le16_to_cpu(exp_if->variant) & 0x07; | 405 | synth = le16_to_cpu(exp_if->variant); |
405 | tmp += sizeof(struct bootrec_exp_if); | 406 | tmp += sizeof(struct bootrec_exp_if); |
406 | } | 407 | } |
407 | break; | 408 | break; |
@@ -427,22 +428,13 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) | |||
427 | goto err; | 428 | goto err; |
428 | } | 429 | } |
429 | 430 | ||
430 | switch (priv->rxhw) { | 431 | priv->rxhw = synth & 0x07; |
431 | case 4: /* XBow */ | 432 | if (priv->rxhw == 4) |
432 | p54_init_xbow_synth(dev); | 433 | p54_init_xbow_synth(dev); |
433 | case 1: /* Indigo? */ | 434 | if (!(synth & 0x40)) |
434 | case 2: /* Duette */ | ||
435 | dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz; | ||
436 | case 3: /* Frisbee */ | ||
437 | case 5: /* Longbow */ | ||
438 | dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz; | 435 | dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz; |
439 | break; | 436 | if (!(synth & 0x80)) |
440 | default: | 437 | dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz; |
441 | printk(KERN_ERR "%s: unsupported RF-Chip\n", | ||
442 | wiphy_name(dev->wiphy)); | ||
443 | err = -EINVAL; | ||
444 | goto err; | ||
445 | } | ||
446 | 438 | ||
447 | if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { | 439 | if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { |
448 | u8 perm_addr[ETH_ALEN]; | 440 | u8 perm_addr[ETH_ALEN]; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 64875859d654..c8bcb59adfdf 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -541,6 +541,14 @@ struct net_device | |||
541 | #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) | 541 | #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) |
542 | #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) | 542 | #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) |
543 | 543 | ||
544 | /* | ||
545 | * If one device supports one of these features, then enable them | ||
546 | * for all in netdev_increment_features. | ||
547 | */ | ||
548 | #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ | ||
549 | NETIF_F_SG | NETIF_F_HIGHDMA | \ | ||
550 | NETIF_F_FRAGLIST) | ||
551 | |||
544 | /* Interface index. Unique device identifier */ | 552 | /* Interface index. Unique device identifier */ |
545 | int ifindex; | 553 | int ifindex; |
546 | int iflink; | 554 | int iflink; |
@@ -1698,7 +1706,9 @@ extern char *netdev_drivername(const struct net_device *dev, char *buffer, int l | |||
1698 | 1706 | ||
1699 | extern void linkwatch_run_queue(void); | 1707 | extern void linkwatch_run_queue(void); |
1700 | 1708 | ||
1701 | extern int netdev_compute_features(unsigned long all, unsigned long one); | 1709 | unsigned long netdev_increment_features(unsigned long all, unsigned long one, |
1710 | unsigned long mask); | ||
1711 | unsigned long netdev_fix_features(unsigned long features, const char *name); | ||
1702 | 1712 | ||
1703 | static inline int net_gso_ok(int features, int gso_type) | 1713 | static inline int net_gso_ok(int features, int gso_type) |
1704 | { | 1714 | { |
diff --git a/include/linux/smc911x.h b/include/linux/smc911x.h index b58f54c24183..521f37143fae 100644 --- a/include/linux/smc911x.h +++ b/include/linux/smc911x.h | |||
@@ -7,6 +7,7 @@ | |||
7 | struct smc911x_platdata { | 7 | struct smc911x_platdata { |
8 | unsigned long flags; | 8 | unsigned long flags; |
9 | unsigned long irq_flags; /* IRQF_... */ | 9 | unsigned long irq_flags; /* IRQF_... */ |
10 | int irq_polarity; | ||
10 | }; | 11 | }; |
11 | 12 | ||
12 | #endif /* __SMC911X_H__ */ | 13 | #endif /* __SMC911X_H__ */ |
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 029a54a02396..c1dd89365833 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h | |||
@@ -125,6 +125,7 @@ sctp_state_fn_t sctp_sf_beat_8_3; | |||
125 | sctp_state_fn_t sctp_sf_backbeat_8_3; | 125 | sctp_state_fn_t sctp_sf_backbeat_8_3; |
126 | sctp_state_fn_t sctp_sf_do_9_2_final; | 126 | sctp_state_fn_t sctp_sf_do_9_2_final; |
127 | sctp_state_fn_t sctp_sf_do_9_2_shutdown; | 127 | sctp_state_fn_t sctp_sf_do_9_2_shutdown; |
128 | sctp_state_fn_t sctp_sf_do_9_2_shut_ctsn; | ||
128 | sctp_state_fn_t sctp_sf_do_ecn_cwr; | 129 | sctp_state_fn_t sctp_sf_do_ecn_cwr; |
129 | sctp_state_fn_t sctp_sf_do_ecne; | 130 | sctp_state_fn_t sctp_sf_do_ecne; |
130 | sctp_state_fn_t sctp_sf_ootb; | 131 | sctp_state_fn_t sctp_sf_ootb; |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 22ba8632196f..6c023f0f8252 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -179,5 +179,5 @@ void br_dev_setup(struct net_device *dev) | |||
179 | 179 | ||
180 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | | 180 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | |
181 | NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX | | 181 | NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX | |
182 | NETIF_F_NETNS_LOCAL; | 182 | NETIF_F_NETNS_LOCAL | NETIF_F_GSO; |
183 | } | 183 | } |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 573e20f7dba4..0a09ccf68c1c 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -347,15 +347,21 @@ int br_min_mtu(const struct net_bridge *br) | |||
347 | void br_features_recompute(struct net_bridge *br) | 347 | void br_features_recompute(struct net_bridge *br) |
348 | { | 348 | { |
349 | struct net_bridge_port *p; | 349 | struct net_bridge_port *p; |
350 | unsigned long features; | 350 | unsigned long features, mask; |
351 | 351 | ||
352 | features = br->feature_mask; | 352 | features = mask = br->feature_mask; |
353 | if (list_empty(&br->port_list)) | ||
354 | goto done; | ||
355 | |||
356 | features &= ~NETIF_F_ONE_FOR_ALL; | ||
353 | 357 | ||
354 | list_for_each_entry(p, &br->port_list, list) { | 358 | list_for_each_entry(p, &br->port_list, list) { |
355 | features = netdev_compute_features(features, p->dev->features); | 359 | features = netdev_increment_features(features, |
360 | p->dev->features, mask); | ||
356 | } | 361 | } |
357 | 362 | ||
358 | br->dev->features = features; | 363 | done: |
364 | br->dev->features = netdev_fix_features(features, NULL); | ||
359 | } | 365 | } |
360 | 366 | ||
361 | /* called with RTNL */ | 367 | /* called with RTNL */ |
diff --git a/net/core/dev.c b/net/core/dev.c index b8a4fd0806af..d9038e328cc1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3947,6 +3947,46 @@ static void netdev_init_queue_locks(struct net_device *dev) | |||
3947 | __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL); | 3947 | __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL); |
3948 | } | 3948 | } |
3949 | 3949 | ||
3950 | unsigned long netdev_fix_features(unsigned long features, const char *name) | ||
3951 | { | ||
3952 | /* Fix illegal SG+CSUM combinations. */ | ||
3953 | if ((features & NETIF_F_SG) && | ||
3954 | !(features & NETIF_F_ALL_CSUM)) { | ||
3955 | if (name) | ||
3956 | printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no " | ||
3957 | "checksum feature.\n", name); | ||
3958 | features &= ~NETIF_F_SG; | ||
3959 | } | ||
3960 | |||
3961 | /* TSO requires that SG is present as well. */ | ||
3962 | if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) { | ||
3963 | if (name) | ||
3964 | printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no " | ||
3965 | "SG feature.\n", name); | ||
3966 | features &= ~NETIF_F_TSO; | ||
3967 | } | ||
3968 | |||
3969 | if (features & NETIF_F_UFO) { | ||
3970 | if (!(features & NETIF_F_GEN_CSUM)) { | ||
3971 | if (name) | ||
3972 | printk(KERN_ERR "%s: Dropping NETIF_F_UFO " | ||
3973 | "since no NETIF_F_HW_CSUM feature.\n", | ||
3974 | name); | ||
3975 | features &= ~NETIF_F_UFO; | ||
3976 | } | ||
3977 | |||
3978 | if (!(features & NETIF_F_SG)) { | ||
3979 | if (name) | ||
3980 | printk(KERN_ERR "%s: Dropping NETIF_F_UFO " | ||
3981 | "since no NETIF_F_SG feature.\n", name); | ||
3982 | features &= ~NETIF_F_UFO; | ||
3983 | } | ||
3984 | } | ||
3985 | |||
3986 | return features; | ||
3987 | } | ||
3988 | EXPORT_SYMBOL(netdev_fix_features); | ||
3989 | |||
3950 | /** | 3990 | /** |
3951 | * register_netdevice - register a network device | 3991 | * register_netdevice - register a network device |
3952 | * @dev: device to register | 3992 | * @dev: device to register |
@@ -4032,36 +4072,7 @@ int register_netdevice(struct net_device *dev) | |||
4032 | dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); | 4072 | dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); |
4033 | } | 4073 | } |
4034 | 4074 | ||
4035 | 4075 | dev->features = netdev_fix_features(dev->features, dev->name); | |
4036 | /* Fix illegal SG+CSUM combinations. */ | ||
4037 | if ((dev->features & NETIF_F_SG) && | ||
4038 | !(dev->features & NETIF_F_ALL_CSUM)) { | ||
4039 | printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n", | ||
4040 | dev->name); | ||
4041 | dev->features &= ~NETIF_F_SG; | ||
4042 | } | ||
4043 | |||
4044 | /* TSO requires that SG is present as well. */ | ||
4045 | if ((dev->features & NETIF_F_TSO) && | ||
4046 | !(dev->features & NETIF_F_SG)) { | ||
4047 | printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n", | ||
4048 | dev->name); | ||
4049 | dev->features &= ~NETIF_F_TSO; | ||
4050 | } | ||
4051 | if (dev->features & NETIF_F_UFO) { | ||
4052 | if (!(dev->features & NETIF_F_HW_CSUM)) { | ||
4053 | printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no " | ||
4054 | "NETIF_F_HW_CSUM feature.\n", | ||
4055 | dev->name); | ||
4056 | dev->features &= ~NETIF_F_UFO; | ||
4057 | } | ||
4058 | if (!(dev->features & NETIF_F_SG)) { | ||
4059 | printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no " | ||
4060 | "NETIF_F_SG feature.\n", | ||
4061 | dev->name); | ||
4062 | dev->features &= ~NETIF_F_UFO; | ||
4063 | } | ||
4064 | } | ||
4065 | 4076 | ||
4066 | /* Enable software GSO if SG is supported. */ | 4077 | /* Enable software GSO if SG is supported. */ |
4067 | if (dev->features & NETIF_F_SG) | 4078 | if (dev->features & NETIF_F_SG) |
@@ -4700,49 +4711,45 @@ static int __init netdev_dma_register(void) { return -ENODEV; } | |||
4700 | #endif /* CONFIG_NET_DMA */ | 4711 | #endif /* CONFIG_NET_DMA */ |
4701 | 4712 | ||
4702 | /** | 4713 | /** |
4703 | * netdev_compute_feature - compute conjunction of two feature sets | 4714 | * netdev_increment_features - increment feature set by one |
4704 | * @all: first feature set | 4715 | * @all: current feature set |
4705 | * @one: second feature set | 4716 | * @one: new feature set |
4717 | * @mask: mask feature set | ||
4706 | * | 4718 | * |
4707 | * Computes a new feature set after adding a device with feature set | 4719 | * Computes a new feature set after adding a device with feature set |
4708 | * @one to the master device with current feature set @all. Returns | 4720 | * @one to the master device with current feature set @all. Will not |
4709 | * the new feature set. | 4721 | * enable anything that is off in @mask. Returns the new feature set. |
4710 | */ | 4722 | */ |
4711 | int netdev_compute_features(unsigned long all, unsigned long one) | 4723 | unsigned long netdev_increment_features(unsigned long all, unsigned long one, |
4712 | { | 4724 | unsigned long mask) |
4713 | /* if device needs checksumming, downgrade to hw checksumming */ | 4725 | { |
4714 | if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) | 4726 | /* If device needs checksumming, downgrade to it. */ |
4715 | all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM; | 4727 | if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) |
4716 | 4728 | all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM); | |
4717 | /* if device can't do all checksum, downgrade to ipv4/ipv6 */ | 4729 | else if (mask & NETIF_F_ALL_CSUM) { |
4718 | if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM)) | 4730 | /* If one device supports v4/v6 checksumming, set for all. */ |
4719 | all ^= NETIF_F_HW_CSUM | 4731 | if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) && |
4720 | | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | 4732 | !(all & NETIF_F_GEN_CSUM)) { |
4721 | 4733 | all &= ~NETIF_F_ALL_CSUM; | |
4722 | if (one & NETIF_F_GSO) | 4734 | all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); |
4723 | one |= NETIF_F_GSO_SOFTWARE; | 4735 | } |
4724 | one |= NETIF_F_GSO; | ||
4725 | |||
4726 | /* | ||
4727 | * If even one device supports a GSO protocol with software fallback, | ||
4728 | * enable it for all. | ||
4729 | */ | ||
4730 | all |= one & NETIF_F_GSO_SOFTWARE; | ||
4731 | 4736 | ||
4732 | /* If even one device supports robust GSO, enable it for all. */ | 4737 | /* If one device supports hw checksumming, set for all. */ |
4733 | if (one & NETIF_F_GSO_ROBUST) | 4738 | if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) { |
4734 | all |= NETIF_F_GSO_ROBUST; | 4739 | all &= ~NETIF_F_ALL_CSUM; |
4740 | all |= NETIF_F_HW_CSUM; | ||
4741 | } | ||
4742 | } | ||
4735 | 4743 | ||
4736 | all &= one | NETIF_F_LLTX; | 4744 | one |= NETIF_F_ALL_CSUM; |
4737 | 4745 | ||
4738 | if (!(all & NETIF_F_ALL_CSUM)) | 4746 | one |= all & NETIF_F_ONE_FOR_ALL; |
4739 | all &= ~NETIF_F_SG; | 4747 | all &= one | NETIF_F_LLTX | NETIF_F_GSO; |
4740 | if (!(all & NETIF_F_SG)) | 4748 | all |= one & mask & NETIF_F_ONE_FOR_ALL; |
4741 | all &= ~NETIF_F_GSO_MASK; | ||
4742 | 4749 | ||
4743 | return all; | 4750 | return all; |
4744 | } | 4751 | } |
4745 | EXPORT_SYMBOL(netdev_compute_features); | 4752 | EXPORT_SYMBOL(netdev_increment_features); |
4746 | 4753 | ||
4747 | static struct hlist_head *netdev_create_hash(void) | 4754 | static struct hlist_head *netdev_create_hash(void) |
4748 | { | 4755 | { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 990a58493235..e4c5ac9fe89b 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -362,6 +362,17 @@ struct tcp_out_options { | |||
362 | __u32 tsval, tsecr; /* need to include OPTION_TS */ | 362 | __u32 tsval, tsecr; /* need to include OPTION_TS */ |
363 | }; | 363 | }; |
364 | 364 | ||
365 | /* Beware: Something in the Internet is very sensitive to the ordering of | ||
366 | * TCP options, we learned this through the hard way, so be careful here. | ||
367 | * Luckily we can at least blame others for their non-compliance but from | ||
368 | * inter-operatibility perspective it seems that we're somewhat stuck with | ||
369 | * the ordering which we have been using if we want to keep working with | ||
370 | * those broken things (not that it currently hurts anybody as there isn't | ||
371 | * particular reason why the ordering would need to be changed). | ||
372 | * | ||
373 | * At least SACK_PERM as the first option is known to lead to a disaster | ||
374 | * (but it may well be that other scenarios fail similarly). | ||
375 | */ | ||
365 | static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, | 376 | static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, |
366 | const struct tcp_out_options *opts, | 377 | const struct tcp_out_options *opts, |
367 | __u8 **md5_hash) { | 378 | __u8 **md5_hash) { |
@@ -376,6 +387,12 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, | |||
376 | *md5_hash = NULL; | 387 | *md5_hash = NULL; |
377 | } | 388 | } |
378 | 389 | ||
390 | if (unlikely(opts->mss)) { | ||
391 | *ptr++ = htonl((TCPOPT_MSS << 24) | | ||
392 | (TCPOLEN_MSS << 16) | | ||
393 | opts->mss); | ||
394 | } | ||
395 | |||
379 | if (likely(OPTION_TS & opts->options)) { | 396 | if (likely(OPTION_TS & opts->options)) { |
380 | if (unlikely(OPTION_SACK_ADVERTISE & opts->options)) { | 397 | if (unlikely(OPTION_SACK_ADVERTISE & opts->options)) { |
381 | *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | | 398 | *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | |
@@ -392,12 +409,6 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, | |||
392 | *ptr++ = htonl(opts->tsecr); | 409 | *ptr++ = htonl(opts->tsecr); |
393 | } | 410 | } |
394 | 411 | ||
395 | if (unlikely(opts->mss)) { | ||
396 | *ptr++ = htonl((TCPOPT_MSS << 24) | | ||
397 | (TCPOLEN_MSS << 16) | | ||
398 | opts->mss); | ||
399 | } | ||
400 | |||
401 | if (unlikely(OPTION_SACK_ADVERTISE & opts->options && | 412 | if (unlikely(OPTION_SACK_ADVERTISE & opts->options && |
402 | !(OPTION_TS & opts->options))) { | 413 | !(OPTION_TS & opts->options))) { |
403 | *ptr++ = htonl((TCPOPT_NOP << 24) | | 414 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
@@ -432,7 +443,7 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, | |||
432 | 443 | ||
433 | if (tp->rx_opt.dsack) { | 444 | if (tp->rx_opt.dsack) { |
434 | tp->rx_opt.dsack = 0; | 445 | tp->rx_opt.dsack = 0; |
435 | tp->rx_opt.eff_sacks--; | 446 | tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks; |
436 | } | 447 | } |
437 | } | 448 | } |
438 | } | 449 | } |
diff --git a/net/sctp/input.c b/net/sctp/input.c index a49fa80b57b9..bf612d954d41 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -369,7 +369,7 @@ static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) | |||
369 | void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, | 369 | void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, |
370 | struct sctp_transport *t, __u32 pmtu) | 370 | struct sctp_transport *t, __u32 pmtu) |
371 | { | 371 | { |
372 | if (!t || (t->pathmtu == pmtu)) | 372 | if (!t || (t->pathmtu <= pmtu)) |
373 | return; | 373 | return; |
374 | 374 | ||
375 | if (sock_owned_by_user(sk)) { | 375 | if (sock_owned_by_user(sk)) { |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index d4c3fbc4671e..a6a0ea71ae93 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -2544,6 +2544,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep, | |||
2544 | sctp_shutdownhdr_t *sdh; | 2544 | sctp_shutdownhdr_t *sdh; |
2545 | sctp_disposition_t disposition; | 2545 | sctp_disposition_t disposition; |
2546 | struct sctp_ulpevent *ev; | 2546 | struct sctp_ulpevent *ev; |
2547 | __u32 ctsn; | ||
2547 | 2548 | ||
2548 | if (!sctp_vtag_verify(chunk, asoc)) | 2549 | if (!sctp_vtag_verify(chunk, asoc)) |
2549 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 2550 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
@@ -2558,6 +2559,14 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep, | |||
2558 | sdh = (sctp_shutdownhdr_t *)chunk->skb->data; | 2559 | sdh = (sctp_shutdownhdr_t *)chunk->skb->data; |
2559 | skb_pull(chunk->skb, sizeof(sctp_shutdownhdr_t)); | 2560 | skb_pull(chunk->skb, sizeof(sctp_shutdownhdr_t)); |
2560 | chunk->subh.shutdown_hdr = sdh; | 2561 | chunk->subh.shutdown_hdr = sdh; |
2562 | ctsn = ntohl(sdh->cum_tsn_ack); | ||
2563 | |||
2564 | /* If Cumulative TSN Ack beyond the max tsn currently | ||
2565 | * send, terminating the association and respond to the | ||
2566 | * sender with an ABORT. | ||
2567 | */ | ||
2568 | if (!TSN_lt(ctsn, asoc->next_tsn)) | ||
2569 | return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands); | ||
2561 | 2570 | ||
2562 | /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT | 2571 | /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT |
2563 | * When a peer sends a SHUTDOWN, SCTP delivers this notification to | 2572 | * When a peer sends a SHUTDOWN, SCTP delivers this notification to |
@@ -2599,6 +2608,51 @@ out: | |||
2599 | return disposition; | 2608 | return disposition; |
2600 | } | 2609 | } |
2601 | 2610 | ||
2611 | /* | ||
2612 | * sctp_sf_do_9_2_shut_ctsn | ||
2613 | * | ||
2614 | * Once an endpoint has reached the SHUTDOWN-RECEIVED state, | ||
2615 | * it MUST NOT send a SHUTDOWN in response to a ULP request. | ||
2616 | * The Cumulative TSN Ack of the received SHUTDOWN chunk | ||
2617 | * MUST be processed. | ||
2618 | */ | ||
2619 | sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep, | ||
2620 | const struct sctp_association *asoc, | ||
2621 | const sctp_subtype_t type, | ||
2622 | void *arg, | ||
2623 | sctp_cmd_seq_t *commands) | ||
2624 | { | ||
2625 | struct sctp_chunk *chunk = arg; | ||
2626 | sctp_shutdownhdr_t *sdh; | ||
2627 | |||
2628 | if (!sctp_vtag_verify(chunk, asoc)) | ||
2629 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2630 | |||
2631 | /* Make sure that the SHUTDOWN chunk has a valid length. */ | ||
2632 | if (!sctp_chunk_length_valid(chunk, | ||
2633 | sizeof(struct sctp_shutdown_chunk_t))) | ||
2634 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
2635 | commands); | ||
2636 | |||
2637 | sdh = (sctp_shutdownhdr_t *)chunk->skb->data; | ||
2638 | |||
2639 | /* If Cumulative TSN Ack beyond the max tsn currently | ||
2640 | * send, terminating the association and respond to the | ||
2641 | * sender with an ABORT. | ||
2642 | */ | ||
2643 | if (!TSN_lt(ntohl(sdh->cum_tsn_ack), asoc->next_tsn)) | ||
2644 | return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands); | ||
2645 | |||
2646 | /* verify, by checking the Cumulative TSN Ack field of the | ||
2647 | * chunk, that all its outstanding DATA chunks have been | ||
2648 | * received by the SHUTDOWN sender. | ||
2649 | */ | ||
2650 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN, | ||
2651 | SCTP_BE32(sdh->cum_tsn_ack)); | ||
2652 | |||
2653 | return SCTP_DISPOSITION_CONSUME; | ||
2654 | } | ||
2655 | |||
2602 | /* RFC 2960 9.2 | 2656 | /* RFC 2960 9.2 |
2603 | * If an endpoint is in SHUTDOWN-ACK-SENT state and receives an INIT chunk | 2657 | * If an endpoint is in SHUTDOWN-ACK-SENT state and receives an INIT chunk |
2604 | * (e.g., if the SHUTDOWN COMPLETE was lost) with source and destination | 2658 | * (e.g., if the SHUTDOWN COMPLETE was lost) with source and destination |
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index dd4ddc40c0ad..5c8186d88c61 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c | |||
@@ -266,11 +266,11 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
266 | /* SCTP_STATE_ESTABLISHED */ \ | 266 | /* SCTP_STATE_ESTABLISHED */ \ |
267 | TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown), \ | 267 | TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown), \ |
268 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 268 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
269 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 269 | TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown), \ |
270 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 270 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
271 | TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown_ack), \ | 271 | TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown_ack), \ |
272 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 272 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
273 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 273 | TYPE_SCTP_FUNC(sctp_sf_do_9_2_shut_ctsn), \ |
274 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 274 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
275 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ | 275 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
276 | } /* TYPE_SCTP_SHUTDOWN */ | 276 | } /* TYPE_SCTP_SHUTDOWN */ |