diff options
author | Matt Fleming <matt.fleming@intel.com> | 2013-04-30 06:30:24 -0400 |
---|---|---|
committer | Matt Fleming <matt.fleming@intel.com> | 2013-04-30 06:42:13 -0400 |
commit | a614e1923d5389d01f3545ee4a90e39a04d0c90d (patch) | |
tree | e8e32dba12a66ffd26d78f015a142ac28e19ad8e /drivers | |
parent | f53f292eeaa234615c31a1306babe703fc4263f2 (diff) | |
parent | c1be5a5b1b355d40e6cf79cc979eb66dafa24ad1 (diff) |
Merge tag 'v3.9' into efi-for-tip2
Resolve conflicts for Ingo.
Conflicts:
drivers/firmware/Kconfig
drivers/firmware/efivars.c
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Diffstat (limited to 'drivers')
214 files changed, 2774 insertions, 2101 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 92ed9692c47e..4bf68c8d4797 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -396,7 +396,7 @@ config ACPI_CUSTOM_METHOD | |||
396 | 396 | ||
397 | config ACPI_BGRT | 397 | config ACPI_BGRT |
398 | bool "Boottime Graphics Resource Table support" | 398 | bool "Boottime Graphics Resource Table support" |
399 | depends on EFI | 399 | depends on EFI && X86 |
400 | help | 400 | help |
401 | This driver adds support for exposing the ACPI Boottime Graphics | 401 | This driver adds support for exposing the ACPI Boottime Graphics |
402 | Resource Table, which allows the operating system to obtain | 402 | Resource Table, which allows the operating system to obtain |
diff --git a/drivers/acpi/acpi_i2c.c b/drivers/acpi/acpi_i2c.c index 82045e3f5cac..a82c7626aa9b 100644 --- a/drivers/acpi/acpi_i2c.c +++ b/drivers/acpi/acpi_i2c.c | |||
@@ -90,7 +90,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adapter) | |||
90 | acpi_handle handle; | 90 | acpi_handle handle; |
91 | acpi_status status; | 91 | acpi_status status; |
92 | 92 | ||
93 | handle = ACPI_HANDLE(&adapter->dev); | 93 | handle = ACPI_HANDLE(adapter->dev.parent); |
94 | if (!handle) | 94 | if (!handle) |
95 | return; | 95 | return; |
96 | 96 | ||
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 5ff173066127..6ae5e440436e 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -415,7 +415,6 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
415 | struct acpi_pci_root *root; | 415 | struct acpi_pci_root *root; |
416 | struct acpi_pci_driver *driver; | 416 | struct acpi_pci_driver *driver; |
417 | u32 flags, base_flags; | 417 | u32 flags, base_flags; |
418 | bool is_osc_granted = false; | ||
419 | 418 | ||
420 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); | 419 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); |
421 | if (!root) | 420 | if (!root) |
@@ -476,6 +475,30 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
476 | flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; | 475 | flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; |
477 | acpi_pci_osc_support(root, flags); | 476 | acpi_pci_osc_support(root, flags); |
478 | 477 | ||
478 | /* | ||
479 | * TBD: Need PCI interface for enumeration/configuration of roots. | ||
480 | */ | ||
481 | |||
482 | mutex_lock(&acpi_pci_root_lock); | ||
483 | list_add_tail(&root->node, &acpi_pci_roots); | ||
484 | mutex_unlock(&acpi_pci_root_lock); | ||
485 | |||
486 | /* | ||
487 | * Scan the Root Bridge | ||
488 | * -------------------- | ||
489 | * Must do this prior to any attempt to bind the root device, as the | ||
490 | * PCI namespace does not get created until this call is made (and | ||
491 | * thus the root bridge's pci_dev does not exist). | ||
492 | */ | ||
493 | root->bus = pci_acpi_scan_root(root); | ||
494 | if (!root->bus) { | ||
495 | printk(KERN_ERR PREFIX | ||
496 | "Bus %04x:%02x not present in PCI namespace\n", | ||
497 | root->segment, (unsigned int)root->secondary.start); | ||
498 | result = -ENODEV; | ||
499 | goto out_del_root; | ||
500 | } | ||
501 | |||
479 | /* Indicate support for various _OSC capabilities. */ | 502 | /* Indicate support for various _OSC capabilities. */ |
480 | if (pci_ext_cfg_avail()) | 503 | if (pci_ext_cfg_avail()) |
481 | flags |= OSC_EXT_PCI_CONFIG_SUPPORT; | 504 | flags |= OSC_EXT_PCI_CONFIG_SUPPORT; |
@@ -494,6 +517,7 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
494 | flags = base_flags; | 517 | flags = base_flags; |
495 | } | 518 | } |
496 | } | 519 | } |
520 | |||
497 | if (!pcie_ports_disabled | 521 | if (!pcie_ports_disabled |
498 | && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { | 522 | && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { |
499 | flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL | 523 | flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL |
@@ -514,54 +538,28 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
514 | status = acpi_pci_osc_control_set(device->handle, &flags, | 538 | status = acpi_pci_osc_control_set(device->handle, &flags, |
515 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | 539 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); |
516 | if (ACPI_SUCCESS(status)) { | 540 | if (ACPI_SUCCESS(status)) { |
517 | is_osc_granted = true; | ||
518 | dev_info(&device->dev, | 541 | dev_info(&device->dev, |
519 | "ACPI _OSC control (0x%02x) granted\n", flags); | 542 | "ACPI _OSC control (0x%02x) granted\n", flags); |
543 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { | ||
544 | /* | ||
545 | * We have ASPM control, but the FADT indicates | ||
546 | * that it's unsupported. Clear it. | ||
547 | */ | ||
548 | pcie_clear_aspm(root->bus); | ||
549 | } | ||
520 | } else { | 550 | } else { |
521 | is_osc_granted = false; | ||
522 | dev_info(&device->dev, | 551 | dev_info(&device->dev, |
523 | "ACPI _OSC request failed (%s), " | 552 | "ACPI _OSC request failed (%s), " |
524 | "returned control mask: 0x%02x\n", | 553 | "returned control mask: 0x%02x\n", |
525 | acpi_format_exception(status), flags); | 554 | acpi_format_exception(status), flags); |
555 | pr_info("ACPI _OSC control for PCIe not granted, " | ||
556 | "disabling ASPM\n"); | ||
557 | pcie_no_aspm(); | ||
526 | } | 558 | } |
527 | } else { | 559 | } else { |
528 | dev_info(&device->dev, | 560 | dev_info(&device->dev, |
529 | "Unable to request _OSC control " | 561 | "Unable to request _OSC control " |
530 | "(_OSC support mask: 0x%02x)\n", flags); | 562 | "(_OSC support mask: 0x%02x)\n", flags); |
531 | } | ||
532 | |||
533 | /* | ||
534 | * TBD: Need PCI interface for enumeration/configuration of roots. | ||
535 | */ | ||
536 | |||
537 | mutex_lock(&acpi_pci_root_lock); | ||
538 | list_add_tail(&root->node, &acpi_pci_roots); | ||
539 | mutex_unlock(&acpi_pci_root_lock); | ||
540 | |||
541 | /* | ||
542 | * Scan the Root Bridge | ||
543 | * -------------------- | ||
544 | * Must do this prior to any attempt to bind the root device, as the | ||
545 | * PCI namespace does not get created until this call is made (and | ||
546 | * thus the root bridge's pci_dev does not exist). | ||
547 | */ | ||
548 | root->bus = pci_acpi_scan_root(root); | ||
549 | if (!root->bus) { | ||
550 | printk(KERN_ERR PREFIX | ||
551 | "Bus %04x:%02x not present in PCI namespace\n", | ||
552 | root->segment, (unsigned int)root->secondary.start); | ||
553 | result = -ENODEV; | ||
554 | goto out_del_root; | ||
555 | } | ||
556 | |||
557 | /* ASPM setting */ | ||
558 | if (is_osc_granted) { | ||
559 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) | ||
560 | pcie_clear_aspm(root->bus); | ||
561 | } else { | ||
562 | pr_info("ACPI _OSC control for PCIe not granted, " | ||
563 | "disabling ASPM\n"); | ||
564 | pcie_no_aspm(); | ||
565 | } | 563 | } |
566 | 564 | ||
567 | pci_acpi_add_bus_pm_notifier(device, root->bus); | 565 | pci_acpi_add_bus_pm_notifier(device, root->bus); |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index fc95308e9a11..ee255c60bdac 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -66,7 +66,8 @@ module_param(latency_factor, uint, 0644); | |||
66 | 66 | ||
67 | static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); | 67 | static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); |
68 | 68 | ||
69 | static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX]; | 69 | static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], |
70 | acpi_cstate); | ||
70 | 71 | ||
71 | static int disabled_by_idle_boot_param(void) | 72 | static int disabled_by_idle_boot_param(void) |
72 | { | 73 | { |
@@ -722,7 +723,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
722 | struct cpuidle_driver *drv, int index) | 723 | struct cpuidle_driver *drv, int index) |
723 | { | 724 | { |
724 | struct acpi_processor *pr; | 725 | struct acpi_processor *pr; |
725 | struct acpi_processor_cx *cx = acpi_cstate[index]; | 726 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
726 | 727 | ||
727 | pr = __this_cpu_read(processors); | 728 | pr = __this_cpu_read(processors); |
728 | 729 | ||
@@ -745,7 +746,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
745 | */ | 746 | */ |
746 | static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) | 747 | static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) |
747 | { | 748 | { |
748 | struct acpi_processor_cx *cx = acpi_cstate[index]; | 749 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
749 | 750 | ||
750 | ACPI_FLUSH_CPU_CACHE(); | 751 | ACPI_FLUSH_CPU_CACHE(); |
751 | 752 | ||
@@ -775,7 +776,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
775 | struct cpuidle_driver *drv, int index) | 776 | struct cpuidle_driver *drv, int index) |
776 | { | 777 | { |
777 | struct acpi_processor *pr; | 778 | struct acpi_processor *pr; |
778 | struct acpi_processor_cx *cx = acpi_cstate[index]; | 779 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
779 | 780 | ||
780 | pr = __this_cpu_read(processors); | 781 | pr = __this_cpu_read(processors); |
781 | 782 | ||
@@ -833,7 +834,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
833 | struct cpuidle_driver *drv, int index) | 834 | struct cpuidle_driver *drv, int index) |
834 | { | 835 | { |
835 | struct acpi_processor *pr; | 836 | struct acpi_processor *pr; |
836 | struct acpi_processor_cx *cx = acpi_cstate[index]; | 837 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
837 | 838 | ||
838 | pr = __this_cpu_read(processors); | 839 | pr = __this_cpu_read(processors); |
839 | 840 | ||
@@ -960,7 +961,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, | |||
960 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | 961 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
961 | continue; | 962 | continue; |
962 | #endif | 963 | #endif |
963 | acpi_cstate[count] = cx; | 964 | per_cpu(acpi_cstate[count], dev->cpu) = cx; |
964 | 965 | ||
965 | count++; | 966 | count++; |
966 | if (count == CPUIDLE_STATE_MAX) | 967 | if (count == CPUIDLE_STATE_MAX) |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index ffdd32d22602..2f48123d74c4 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -150,6 +150,7 @@ enum piix_controller_ids { | |||
150 | tolapai_sata, | 150 | tolapai_sata, |
151 | piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ | 151 | piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ |
152 | ich8_sata_snb, | 152 | ich8_sata_snb, |
153 | ich8_2port_sata_snb, | ||
153 | }; | 154 | }; |
154 | 155 | ||
155 | struct piix_map_db { | 156 | struct piix_map_db { |
@@ -304,7 +305,7 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
304 | /* SATA Controller IDE (Lynx Point) */ | 305 | /* SATA Controller IDE (Lynx Point) */ |
305 | { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, | 306 | { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, |
306 | /* SATA Controller IDE (Lynx Point) */ | 307 | /* SATA Controller IDE (Lynx Point) */ |
307 | { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 308 | { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, |
308 | /* SATA Controller IDE (Lynx Point) */ | 309 | /* SATA Controller IDE (Lynx Point) */ |
309 | { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 310 | { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
310 | /* SATA Controller IDE (Lynx Point-LP) */ | 311 | /* SATA Controller IDE (Lynx Point-LP) */ |
@@ -439,6 +440,7 @@ static const struct piix_map_db *piix_map_db_table[] = { | |||
439 | [ich8m_apple_sata] = &ich8m_apple_map_db, | 440 | [ich8m_apple_sata] = &ich8m_apple_map_db, |
440 | [tolapai_sata] = &tolapai_map_db, | 441 | [tolapai_sata] = &tolapai_map_db, |
441 | [ich8_sata_snb] = &ich8_map_db, | 442 | [ich8_sata_snb] = &ich8_map_db, |
443 | [ich8_2port_sata_snb] = &ich8_2port_map_db, | ||
442 | }; | 444 | }; |
443 | 445 | ||
444 | static struct pci_bits piix_enable_bits[] = { | 446 | static struct pci_bits piix_enable_bits[] = { |
@@ -1242,6 +1244,16 @@ static struct ata_port_info piix_port_info[] = { | |||
1242 | .udma_mask = ATA_UDMA6, | 1244 | .udma_mask = ATA_UDMA6, |
1243 | .port_ops = &piix_sata_ops, | 1245 | .port_ops = &piix_sata_ops, |
1244 | }, | 1246 | }, |
1247 | |||
1248 | [ich8_2port_sata_snb] = | ||
1249 | { | ||
1250 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | ||
1251 | | PIIX_FLAG_PIO16, | ||
1252 | .pio_mask = ATA_PIO4, | ||
1253 | .mwdma_mask = ATA_MWDMA2, | ||
1254 | .udma_mask = ATA_UDMA6, | ||
1255 | .port_ops = &piix_sata_ops, | ||
1256 | }, | ||
1245 | }; | 1257 | }; |
1246 | 1258 | ||
1247 | #define AHCI_PCI_BAR 5 | 1259 | #define AHCI_PCI_BAR 5 |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 497adea1f0d6..63c743baf920 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -2329,7 +2329,7 @@ int ata_dev_configure(struct ata_device *dev) | |||
2329 | * from SATA Settings page of Identify Device Data Log. | 2329 | * from SATA Settings page of Identify Device Data Log. |
2330 | */ | 2330 | */ |
2331 | if (ata_id_has_devslp(dev->id)) { | 2331 | if (ata_id_has_devslp(dev->id)) { |
2332 | u8 sata_setting[ATA_SECT_SIZE]; | 2332 | u8 *sata_setting = ap->sector_buf; |
2333 | int i, j; | 2333 | int i, j; |
2334 | 2334 | ||
2335 | dev->flags |= ATA_DFLAG_DEVSLP; | 2335 | dev->flags |= ATA_DFLAG_DEVSLP; |
@@ -2439,6 +2439,9 @@ int ata_dev_configure(struct ata_device *dev) | |||
2439 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, | 2439 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, |
2440 | dev->max_sectors); | 2440 | dev->max_sectors); |
2441 | 2441 | ||
2442 | if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) | ||
2443 | dev->max_sectors = ATA_MAX_SECTORS_LBA48; | ||
2444 | |||
2442 | if (ap->ops->dev_config) | 2445 | if (ap->ops->dev_config) |
2443 | ap->ops->dev_config(dev); | 2446 | ap->ops->dev_config(dev); |
2444 | 2447 | ||
@@ -4100,6 +4103,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4100 | /* Weird ATAPI devices */ | 4103 | /* Weird ATAPI devices */ |
4101 | { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, | 4104 | { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, |
4102 | { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, | 4105 | { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, |
4106 | { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, | ||
4103 | 4107 | ||
4104 | /* Devices we expect to fail diagnostics */ | 4108 | /* Devices we expect to fail diagnostics */ |
4105 | 4109 | ||
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 318b41358187..ff44787e5a45 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -532,8 +532,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) | |||
532 | struct scsi_sense_hdr sshdr; | 532 | struct scsi_sense_hdr sshdr; |
533 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, | 533 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, |
534 | &sshdr); | 534 | &sshdr); |
535 | if (sshdr.sense_key == 0 && | 535 | if (sshdr.sense_key == RECOVERED_ERROR && |
536 | sshdr.asc == 0 && sshdr.ascq == 0) | 536 | sshdr.asc == 0 && sshdr.ascq == 0x1d) |
537 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; | 537 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; |
538 | } | 538 | } |
539 | 539 | ||
@@ -618,8 +618,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) | |||
618 | struct scsi_sense_hdr sshdr; | 618 | struct scsi_sense_hdr sshdr; |
619 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, | 619 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, |
620 | &sshdr); | 620 | &sshdr); |
621 | if (sshdr.sense_key == 0 && | 621 | if (sshdr.sense_key == RECOVERED_ERROR && |
622 | sshdr.asc == 0 && sshdr.ascq == 0) | 622 | sshdr.asc == 0 && sshdr.ascq == 0x1d) |
623 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; | 623 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; |
624 | } | 624 | } |
625 | 625 | ||
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 5f74587ef258..71671c42ef45 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include "power.h" | 46 | #include "power.h" |
47 | 47 | ||
48 | static DEFINE_MUTEX(dev_pm_qos_mtx); | 48 | static DEFINE_MUTEX(dev_pm_qos_mtx); |
49 | static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); | ||
49 | 50 | ||
50 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); | 51 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); |
51 | 52 | ||
@@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
216 | struct pm_qos_constraints *c; | 217 | struct pm_qos_constraints *c; |
217 | struct pm_qos_flags *f; | 218 | struct pm_qos_flags *f; |
218 | 219 | ||
219 | mutex_lock(&dev_pm_qos_mtx); | 220 | mutex_lock(&dev_pm_qos_sysfs_mtx); |
220 | 221 | ||
221 | /* | 222 | /* |
222 | * If the device's PM QoS resume latency limit or PM QoS flags have been | 223 | * If the device's PM QoS resume latency limit or PM QoS flags have been |
223 | * exposed to user space, they have to be hidden at this point. | 224 | * exposed to user space, they have to be hidden at this point. |
224 | */ | 225 | */ |
226 | pm_qos_sysfs_remove_latency(dev); | ||
227 | pm_qos_sysfs_remove_flags(dev); | ||
228 | |||
229 | mutex_lock(&dev_pm_qos_mtx); | ||
230 | |||
225 | __dev_pm_qos_hide_latency_limit(dev); | 231 | __dev_pm_qos_hide_latency_limit(dev); |
226 | __dev_pm_qos_hide_flags(dev); | 232 | __dev_pm_qos_hide_flags(dev); |
227 | 233 | ||
@@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
254 | 260 | ||
255 | out: | 261 | out: |
256 | mutex_unlock(&dev_pm_qos_mtx); | 262 | mutex_unlock(&dev_pm_qos_mtx); |
263 | |||
264 | mutex_unlock(&dev_pm_qos_sysfs_mtx); | ||
257 | } | 265 | } |
258 | 266 | ||
259 | /** | 267 | /** |
@@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev, | |||
558 | kfree(req); | 566 | kfree(req); |
559 | } | 567 | } |
560 | 568 | ||
569 | static void dev_pm_qos_drop_user_request(struct device *dev, | ||
570 | enum dev_pm_qos_req_type type) | ||
571 | { | ||
572 | mutex_lock(&dev_pm_qos_mtx); | ||
573 | __dev_pm_qos_drop_user_request(dev, type); | ||
574 | mutex_unlock(&dev_pm_qos_mtx); | ||
575 | } | ||
576 | |||
561 | /** | 577 | /** |
562 | * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. | 578 | * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. |
563 | * @dev: Device whose PM QoS latency limit is to be exposed to user space. | 579 | * @dev: Device whose PM QoS latency limit is to be exposed to user space. |
@@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | |||
581 | return ret; | 597 | return ret; |
582 | } | 598 | } |
583 | 599 | ||
600 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
601 | |||
584 | mutex_lock(&dev_pm_qos_mtx); | 602 | mutex_lock(&dev_pm_qos_mtx); |
585 | 603 | ||
586 | if (IS_ERR_OR_NULL(dev->power.qos)) | 604 | if (IS_ERR_OR_NULL(dev->power.qos)) |
@@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | |||
591 | if (ret < 0) { | 609 | if (ret < 0) { |
592 | __dev_pm_qos_remove_request(req); | 610 | __dev_pm_qos_remove_request(req); |
593 | kfree(req); | 611 | kfree(req); |
612 | mutex_unlock(&dev_pm_qos_mtx); | ||
594 | goto out; | 613 | goto out; |
595 | } | 614 | } |
596 | |||
597 | dev->power.qos->latency_req = req; | 615 | dev->power.qos->latency_req = req; |
616 | |||
617 | mutex_unlock(&dev_pm_qos_mtx); | ||
618 | |||
598 | ret = pm_qos_sysfs_add_latency(dev); | 619 | ret = pm_qos_sysfs_add_latency(dev); |
599 | if (ret) | 620 | if (ret) |
600 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); | 621 | dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); |
601 | 622 | ||
602 | out: | 623 | out: |
603 | mutex_unlock(&dev_pm_qos_mtx); | 624 | mutex_unlock(&dev_pm_qos_sysfs_mtx); |
604 | return ret; | 625 | return ret; |
605 | } | 626 | } |
606 | EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); | 627 | EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); |
607 | 628 | ||
608 | static void __dev_pm_qos_hide_latency_limit(struct device *dev) | 629 | static void __dev_pm_qos_hide_latency_limit(struct device *dev) |
609 | { | 630 | { |
610 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) { | 631 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) |
611 | pm_qos_sysfs_remove_latency(dev); | ||
612 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); | 632 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); |
613 | } | ||
614 | } | 633 | } |
615 | 634 | ||
616 | /** | 635 | /** |
@@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev) | |||
619 | */ | 638 | */ |
620 | void dev_pm_qos_hide_latency_limit(struct device *dev) | 639 | void dev_pm_qos_hide_latency_limit(struct device *dev) |
621 | { | 640 | { |
641 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
642 | |||
643 | pm_qos_sysfs_remove_latency(dev); | ||
644 | |||
622 | mutex_lock(&dev_pm_qos_mtx); | 645 | mutex_lock(&dev_pm_qos_mtx); |
623 | __dev_pm_qos_hide_latency_limit(dev); | 646 | __dev_pm_qos_hide_latency_limit(dev); |
624 | mutex_unlock(&dev_pm_qos_mtx); | 647 | mutex_unlock(&dev_pm_qos_mtx); |
648 | |||
649 | mutex_unlock(&dev_pm_qos_sysfs_mtx); | ||
625 | } | 650 | } |
626 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); | 651 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); |
627 | 652 | ||
@@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) | |||
649 | } | 674 | } |
650 | 675 | ||
651 | pm_runtime_get_sync(dev); | 676 | pm_runtime_get_sync(dev); |
677 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
678 | |||
652 | mutex_lock(&dev_pm_qos_mtx); | 679 | mutex_lock(&dev_pm_qos_mtx); |
653 | 680 | ||
654 | if (IS_ERR_OR_NULL(dev->power.qos)) | 681 | if (IS_ERR_OR_NULL(dev->power.qos)) |
@@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) | |||
659 | if (ret < 0) { | 686 | if (ret < 0) { |
660 | __dev_pm_qos_remove_request(req); | 687 | __dev_pm_qos_remove_request(req); |
661 | kfree(req); | 688 | kfree(req); |
689 | mutex_unlock(&dev_pm_qos_mtx); | ||
662 | goto out; | 690 | goto out; |
663 | } | 691 | } |
664 | |||
665 | dev->power.qos->flags_req = req; | 692 | dev->power.qos->flags_req = req; |
693 | |||
694 | mutex_unlock(&dev_pm_qos_mtx); | ||
695 | |||
666 | ret = pm_qos_sysfs_add_flags(dev); | 696 | ret = pm_qos_sysfs_add_flags(dev); |
667 | if (ret) | 697 | if (ret) |
668 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); | 698 | dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); |
669 | 699 | ||
670 | out: | 700 | out: |
671 | mutex_unlock(&dev_pm_qos_mtx); | 701 | mutex_unlock(&dev_pm_qos_sysfs_mtx); |
672 | pm_runtime_put(dev); | 702 | pm_runtime_put(dev); |
673 | return ret; | 703 | return ret; |
674 | } | 704 | } |
@@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); | |||
676 | 706 | ||
677 | static void __dev_pm_qos_hide_flags(struct device *dev) | 707 | static void __dev_pm_qos_hide_flags(struct device *dev) |
678 | { | 708 | { |
679 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) { | 709 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) |
680 | pm_qos_sysfs_remove_flags(dev); | ||
681 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); | 710 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); |
682 | } | ||
683 | } | 711 | } |
684 | 712 | ||
685 | /** | 713 | /** |
@@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev) | |||
689 | void dev_pm_qos_hide_flags(struct device *dev) | 717 | void dev_pm_qos_hide_flags(struct device *dev) |
690 | { | 718 | { |
691 | pm_runtime_get_sync(dev); | 719 | pm_runtime_get_sync(dev); |
720 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
721 | |||
722 | pm_qos_sysfs_remove_flags(dev); | ||
723 | |||
692 | mutex_lock(&dev_pm_qos_mtx); | 724 | mutex_lock(&dev_pm_qos_mtx); |
693 | __dev_pm_qos_hide_flags(dev); | 725 | __dev_pm_qos_hide_flags(dev); |
694 | mutex_unlock(&dev_pm_qos_mtx); | 726 | mutex_unlock(&dev_pm_qos_mtx); |
727 | |||
728 | mutex_unlock(&dev_pm_qos_sysfs_mtx); | ||
695 | pm_runtime_put(dev); | 729 | pm_runtime_put(dev); |
696 | } | 730 | } |
697 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); | 731 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); |
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index e6732cf7c06e..79f4fca9877a 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c | |||
@@ -398,7 +398,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, | |||
398 | base = 0; | 398 | base = 0; |
399 | 399 | ||
400 | if (max < rbnode->base_reg + rbnode->blklen) | 400 | if (max < rbnode->base_reg + rbnode->blklen) |
401 | end = rbnode->base_reg + rbnode->blklen - max; | 401 | end = max - rbnode->base_reg + 1; |
402 | else | 402 | else |
403 | end = rbnode->blklen; | 403 | end = rbnode->blklen; |
404 | 404 | ||
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 3d2367501fd0..58cfb3232428 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -710,12 +710,12 @@ skip_format_initialization: | |||
710 | } | 710 | } |
711 | } | 711 | } |
712 | 712 | ||
713 | regmap_debugfs_init(map, config->name); | ||
714 | |||
713 | ret = regcache_init(map, config); | 715 | ret = regcache_init(map, config); |
714 | if (ret != 0) | 716 | if (ret != 0) |
715 | goto err_range; | 717 | goto err_range; |
716 | 718 | ||
717 | regmap_debugfs_init(map, config->name); | ||
718 | |||
719 | /* Add a devres resource for dev_get_regmap() */ | 719 | /* Add a devres resource for dev_get_regmap() */ |
720 | m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); | 720 | m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); |
721 | if (!m) { | 721 | if (!m) { |
@@ -1036,6 +1036,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
1036 | kfree(async->work_buf); | 1036 | kfree(async->work_buf); |
1037 | kfree(async); | 1037 | kfree(async); |
1038 | } | 1038 | } |
1039 | |||
1040 | return ret; | ||
1039 | } | 1041 | } |
1040 | 1042 | ||
1041 | trace_regmap_hw_write_start(map->dev, reg, | 1043 | trace_regmap_hw_write_start(map->dev, reg, |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 25ef5c014fca..92b6d7c51e39 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -51,8 +51,9 @@ new_skb(ulong len) | |||
51 | { | 51 | { |
52 | struct sk_buff *skb; | 52 | struct sk_buff *skb; |
53 | 53 | ||
54 | skb = alloc_skb(len, GFP_ATOMIC); | 54 | skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC); |
55 | if (skb) { | 55 | if (skb) { |
56 | skb_reserve(skb, MAX_HEADER); | ||
56 | skb_reset_mac_header(skb); | 57 | skb_reset_mac_header(skb); |
57 | skb_reset_network_header(skb); | 58 | skb_reset_network_header(skb); |
58 | skb->protocol = __constant_htons(ETH_P_AOE); | 59 | skb->protocol = __constant_htons(ETH_P_AOE); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index fe5f6403417f..dfe758382eaf 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -922,6 +922,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
922 | lo->lo_flags |= LO_FLAGS_PARTSCAN; | 922 | lo->lo_flags |= LO_FLAGS_PARTSCAN; |
923 | if (lo->lo_flags & LO_FLAGS_PARTSCAN) | 923 | if (lo->lo_flags & LO_FLAGS_PARTSCAN) |
924 | ioctl_by_bdev(bdev, BLKRRPART, 0); | 924 | ioctl_by_bdev(bdev, BLKRRPART, 0); |
925 | |||
926 | /* Grab the block_device to prevent its destruction after we | ||
927 | * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). | ||
928 | */ | ||
929 | bdgrab(bdev); | ||
925 | return 0; | 930 | return 0; |
926 | 931 | ||
927 | out_clr: | 932 | out_clr: |
@@ -1031,8 +1036,10 @@ static int loop_clr_fd(struct loop_device *lo) | |||
1031 | memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); | 1036 | memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); |
1032 | memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); | 1037 | memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); |
1033 | memset(lo->lo_file_name, 0, LO_NAME_SIZE); | 1038 | memset(lo->lo_file_name, 0, LO_NAME_SIZE); |
1034 | if (bdev) | 1039 | if (bdev) { |
1040 | bdput(bdev); | ||
1035 | invalidate_bdev(bdev); | 1041 | invalidate_bdev(bdev); |
1042 | } | ||
1036 | set_capacity(lo->lo_disk, 0); | 1043 | set_capacity(lo->lo_disk, 0); |
1037 | loop_sysfs_exit(lo); | 1044 | loop_sysfs_exit(lo); |
1038 | if (bdev) { | 1045 | if (bdev) { |
@@ -1044,29 +1051,12 @@ static int loop_clr_fd(struct loop_device *lo) | |||
1044 | lo->lo_state = Lo_unbound; | 1051 | lo->lo_state = Lo_unbound; |
1045 | /* This is safe: open() is still holding a reference. */ | 1052 | /* This is safe: open() is still holding a reference. */ |
1046 | module_put(THIS_MODULE); | 1053 | module_put(THIS_MODULE); |
1054 | if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) | ||
1055 | ioctl_by_bdev(bdev, BLKRRPART, 0); | ||
1047 | lo->lo_flags = 0; | 1056 | lo->lo_flags = 0; |
1048 | if (!part_shift) | 1057 | if (!part_shift) |
1049 | lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; | 1058 | lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; |
1050 | mutex_unlock(&lo->lo_ctl_mutex); | 1059 | mutex_unlock(&lo->lo_ctl_mutex); |
1051 | |||
1052 | /* | ||
1053 | * Remove all partitions, since BLKRRPART won't remove user | ||
1054 | * added partitions when max_part=0 | ||
1055 | */ | ||
1056 | if (bdev) { | ||
1057 | struct disk_part_iter piter; | ||
1058 | struct hd_struct *part; | ||
1059 | |||
1060 | mutex_lock_nested(&bdev->bd_mutex, 1); | ||
1061 | invalidate_partition(bdev->bd_disk, 0); | ||
1062 | disk_part_iter_init(&piter, bdev->bd_disk, | ||
1063 | DISK_PITER_INCL_EMPTY); | ||
1064 | while ((part = disk_part_iter_next(&piter))) | ||
1065 | delete_partition(bdev->bd_disk, part->partno); | ||
1066 | disk_part_iter_exit(&piter); | ||
1067 | mutex_unlock(&bdev->bd_mutex); | ||
1068 | } | ||
1069 | |||
1070 | /* | 1060 | /* |
1071 | * Need not hold lo_ctl_mutex to fput backing file. | 1061 | * Need not hold lo_ctl_mutex to fput backing file. |
1072 | * Calling fput holding lo_ctl_mutex triggers a circular | 1062 | * Calling fput holding lo_ctl_mutex triggers a circular |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 92250af84e7d..32c678028e53 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -81,12 +81,17 @@ | |||
81 | /* Device instance number, incremented each time a device is probed. */ | 81 | /* Device instance number, incremented each time a device is probed. */ |
82 | static int instance; | 82 | static int instance; |
83 | 83 | ||
84 | struct list_head online_list; | ||
85 | struct list_head removing_list; | ||
86 | spinlock_t dev_lock; | ||
87 | |||
84 | /* | 88 | /* |
85 | * Global variable used to hold the major block device number | 89 | * Global variable used to hold the major block device number |
86 | * allocated in mtip_init(). | 90 | * allocated in mtip_init(). |
87 | */ | 91 | */ |
88 | static int mtip_major; | 92 | static int mtip_major; |
89 | static struct dentry *dfs_parent; | 93 | static struct dentry *dfs_parent; |
94 | static struct dentry *dfs_device_status; | ||
90 | 95 | ||
91 | static u32 cpu_use[NR_CPUS]; | 96 | static u32 cpu_use[NR_CPUS]; |
92 | 97 | ||
@@ -243,40 +248,31 @@ static inline void release_slot(struct mtip_port *port, int tag) | |||
243 | /* | 248 | /* |
244 | * Reset the HBA (without sleeping) | 249 | * Reset the HBA (without sleeping) |
245 | * | 250 | * |
246 | * Just like hba_reset, except does not call sleep, so can be | ||
247 | * run from interrupt/tasklet context. | ||
248 | * | ||
249 | * @dd Pointer to the driver data structure. | 251 | * @dd Pointer to the driver data structure. |
250 | * | 252 | * |
251 | * return value | 253 | * return value |
252 | * 0 The reset was successful. | 254 | * 0 The reset was successful. |
253 | * -1 The HBA Reset bit did not clear. | 255 | * -1 The HBA Reset bit did not clear. |
254 | */ | 256 | */ |
255 | static int hba_reset_nosleep(struct driver_data *dd) | 257 | static int mtip_hba_reset(struct driver_data *dd) |
256 | { | 258 | { |
257 | unsigned long timeout; | 259 | unsigned long timeout; |
258 | 260 | ||
259 | /* Chip quirk: quiesce any chip function */ | ||
260 | mdelay(10); | ||
261 | |||
262 | /* Set the reset bit */ | 261 | /* Set the reset bit */ |
263 | writel(HOST_RESET, dd->mmio + HOST_CTL); | 262 | writel(HOST_RESET, dd->mmio + HOST_CTL); |
264 | 263 | ||
265 | /* Flush */ | 264 | /* Flush */ |
266 | readl(dd->mmio + HOST_CTL); | 265 | readl(dd->mmio + HOST_CTL); |
267 | 266 | ||
268 | /* | 267 | /* Spin for up to 2 seconds, waiting for reset acknowledgement */ |
269 | * Wait 10ms then spin for up to 1 second | 268 | timeout = jiffies + msecs_to_jiffies(2000); |
270 | * waiting for reset acknowledgement | 269 | do { |
271 | */ | 270 | mdelay(10); |
272 | timeout = jiffies + msecs_to_jiffies(1000); | 271 | if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) |
273 | mdelay(10); | 272 | return -1; |
274 | while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) | ||
275 | && time_before(jiffies, timeout)) | ||
276 | mdelay(1); | ||
277 | 273 | ||
278 | if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) | 274 | } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) |
279 | return -1; | 275 | && time_before(jiffies, timeout)); |
280 | 276 | ||
281 | if (readl(dd->mmio + HOST_CTL) & HOST_RESET) | 277 | if (readl(dd->mmio + HOST_CTL) & HOST_RESET) |
282 | return -1; | 278 | return -1; |
@@ -481,7 +477,7 @@ static void mtip_restart_port(struct mtip_port *port) | |||
481 | dev_warn(&port->dd->pdev->dev, | 477 | dev_warn(&port->dd->pdev->dev, |
482 | "PxCMD.CR not clear, escalating reset\n"); | 478 | "PxCMD.CR not clear, escalating reset\n"); |
483 | 479 | ||
484 | if (hba_reset_nosleep(port->dd)) | 480 | if (mtip_hba_reset(port->dd)) |
485 | dev_err(&port->dd->pdev->dev, | 481 | dev_err(&port->dd->pdev->dev, |
486 | "HBA reset escalation failed.\n"); | 482 | "HBA reset escalation failed.\n"); |
487 | 483 | ||
@@ -527,6 +523,26 @@ static void mtip_restart_port(struct mtip_port *port) | |||
527 | 523 | ||
528 | } | 524 | } |
529 | 525 | ||
526 | static int mtip_device_reset(struct driver_data *dd) | ||
527 | { | ||
528 | int rv = 0; | ||
529 | |||
530 | if (mtip_check_surprise_removal(dd->pdev)) | ||
531 | return 0; | ||
532 | |||
533 | if (mtip_hba_reset(dd) < 0) | ||
534 | rv = -EFAULT; | ||
535 | |||
536 | mdelay(1); | ||
537 | mtip_init_port(dd->port); | ||
538 | mtip_start_port(dd->port); | ||
539 | |||
540 | /* Enable interrupts on the HBA. */ | ||
541 | writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, | ||
542 | dd->mmio + HOST_CTL); | ||
543 | return rv; | ||
544 | } | ||
545 | |||
530 | /* | 546 | /* |
531 | * Helper function for tag logging | 547 | * Helper function for tag logging |
532 | */ | 548 | */ |
@@ -632,7 +648,7 @@ static void mtip_timeout_function(unsigned long int data) | |||
632 | if (cmdto_cnt) { | 648 | if (cmdto_cnt) { |
633 | print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); | 649 | print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); |
634 | if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { | 650 | if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { |
635 | mtip_restart_port(port); | 651 | mtip_device_reset(port->dd); |
636 | wake_up_interruptible(&port->svc_wait); | 652 | wake_up_interruptible(&port->svc_wait); |
637 | } | 653 | } |
638 | clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); | 654 | clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); |
@@ -1283,11 +1299,11 @@ static int mtip_exec_internal_command(struct mtip_port *port, | |||
1283 | int rv = 0, ready2go = 1; | 1299 | int rv = 0, ready2go = 1; |
1284 | struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; | 1300 | struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; |
1285 | unsigned long to; | 1301 | unsigned long to; |
1302 | struct driver_data *dd = port->dd; | ||
1286 | 1303 | ||
1287 | /* Make sure the buffer is 8 byte aligned. This is asic specific. */ | 1304 | /* Make sure the buffer is 8 byte aligned. This is asic specific. */ |
1288 | if (buffer & 0x00000007) { | 1305 | if (buffer & 0x00000007) { |
1289 | dev_err(&port->dd->pdev->dev, | 1306 | dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n"); |
1290 | "SG buffer is not 8 byte aligned\n"); | ||
1291 | return -EFAULT; | 1307 | return -EFAULT; |
1292 | } | 1308 | } |
1293 | 1309 | ||
@@ -1300,23 +1316,21 @@ static int mtip_exec_internal_command(struct mtip_port *port, | |||
1300 | mdelay(100); | 1316 | mdelay(100); |
1301 | } while (time_before(jiffies, to)); | 1317 | } while (time_before(jiffies, to)); |
1302 | if (!ready2go) { | 1318 | if (!ready2go) { |
1303 | dev_warn(&port->dd->pdev->dev, | 1319 | dev_warn(&dd->pdev->dev, |
1304 | "Internal cmd active. new cmd [%02X]\n", fis->command); | 1320 | "Internal cmd active. new cmd [%02X]\n", fis->command); |
1305 | return -EBUSY; | 1321 | return -EBUSY; |
1306 | } | 1322 | } |
1307 | set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); | 1323 | set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); |
1308 | port->ic_pause_timer = 0; | 1324 | port->ic_pause_timer = 0; |
1309 | 1325 | ||
1310 | if (fis->command == ATA_CMD_SEC_ERASE_UNIT) | 1326 | clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); |
1311 | clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); | 1327 | clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); |
1312 | else if (fis->command == ATA_CMD_DOWNLOAD_MICRO) | ||
1313 | clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); | ||
1314 | 1328 | ||
1315 | if (atomic == GFP_KERNEL) { | 1329 | if (atomic == GFP_KERNEL) { |
1316 | if (fis->command != ATA_CMD_STANDBYNOW1) { | 1330 | if (fis->command != ATA_CMD_STANDBYNOW1) { |
1317 | /* wait for io to complete if non atomic */ | 1331 | /* wait for io to complete if non atomic */ |
1318 | if (mtip_quiesce_io(port, 5000) < 0) { | 1332 | if (mtip_quiesce_io(port, 5000) < 0) { |
1319 | dev_warn(&port->dd->pdev->dev, | 1333 | dev_warn(&dd->pdev->dev, |
1320 | "Failed to quiesce IO\n"); | 1334 | "Failed to quiesce IO\n"); |
1321 | release_slot(port, MTIP_TAG_INTERNAL); | 1335 | release_slot(port, MTIP_TAG_INTERNAL); |
1322 | clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); | 1336 | clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); |
@@ -1361,58 +1375,84 @@ static int mtip_exec_internal_command(struct mtip_port *port, | |||
1361 | /* Issue the command to the hardware */ | 1375 | /* Issue the command to the hardware */ |
1362 | mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); | 1376 | mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); |
1363 | 1377 | ||
1364 | /* Poll if atomic, wait_for_completion otherwise */ | ||
1365 | if (atomic == GFP_KERNEL) { | 1378 | if (atomic == GFP_KERNEL) { |
1366 | /* Wait for the command to complete or timeout. */ | 1379 | /* Wait for the command to complete or timeout. */ |
1367 | if (wait_for_completion_timeout( | 1380 | if (wait_for_completion_interruptible_timeout( |
1368 | &wait, | 1381 | &wait, |
1369 | msecs_to_jiffies(timeout)) == 0) { | 1382 | msecs_to_jiffies(timeout)) <= 0) { |
1370 | dev_err(&port->dd->pdev->dev, | 1383 | if (rv == -ERESTARTSYS) { /* interrupted */ |
1371 | "Internal command did not complete [%d] " | 1384 | dev_err(&dd->pdev->dev, |
1372 | "within timeout of %lu ms\n", | 1385 | "Internal command [%02X] was interrupted after %lu ms\n", |
1373 | atomic, timeout); | 1386 | fis->command, timeout); |
1374 | if (mtip_check_surprise_removal(port->dd->pdev) || | 1387 | rv = -EINTR; |
1388 | goto exec_ic_exit; | ||
1389 | } else if (rv == 0) /* timeout */ | ||
1390 | dev_err(&dd->pdev->dev, | ||
1391 | "Internal command did not complete [%02X] within timeout of %lu ms\n", | ||
1392 | fis->command, timeout); | ||
1393 | else | ||
1394 | dev_err(&dd->pdev->dev, | ||
1395 | "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n", | ||
1396 | fis->command, rv, timeout); | ||
1397 | |||
1398 | if (mtip_check_surprise_removal(dd->pdev) || | ||
1375 | test_bit(MTIP_DDF_REMOVE_PENDING_BIT, | 1399 | test_bit(MTIP_DDF_REMOVE_PENDING_BIT, |
1376 | &port->dd->dd_flag)) { | 1400 | &dd->dd_flag)) { |
1401 | dev_err(&dd->pdev->dev, | ||
1402 | "Internal command [%02X] wait returned due to SR\n", | ||
1403 | fis->command); | ||
1377 | rv = -ENXIO; | 1404 | rv = -ENXIO; |
1378 | goto exec_ic_exit; | 1405 | goto exec_ic_exit; |
1379 | } | 1406 | } |
1407 | mtip_device_reset(dd); /* recover from timeout issue */ | ||
1380 | rv = -EAGAIN; | 1408 | rv = -EAGAIN; |
1409 | goto exec_ic_exit; | ||
1381 | } | 1410 | } |
1382 | } else { | 1411 | } else { |
1412 | u32 hba_stat, port_stat; | ||
1413 | |||
1383 | /* Spin for <timeout> checking if command still outstanding */ | 1414 | /* Spin for <timeout> checking if command still outstanding */ |
1384 | timeout = jiffies + msecs_to_jiffies(timeout); | 1415 | timeout = jiffies + msecs_to_jiffies(timeout); |
1385 | while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) | 1416 | while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) |
1386 | & (1 << MTIP_TAG_INTERNAL)) | 1417 | & (1 << MTIP_TAG_INTERNAL)) |
1387 | && time_before(jiffies, timeout)) { | 1418 | && time_before(jiffies, timeout)) { |
1388 | if (mtip_check_surprise_removal(port->dd->pdev)) { | 1419 | if (mtip_check_surprise_removal(dd->pdev)) { |
1389 | rv = -ENXIO; | 1420 | rv = -ENXIO; |
1390 | goto exec_ic_exit; | 1421 | goto exec_ic_exit; |
1391 | } | 1422 | } |
1392 | if ((fis->command != ATA_CMD_STANDBYNOW1) && | 1423 | if ((fis->command != ATA_CMD_STANDBYNOW1) && |
1393 | test_bit(MTIP_DDF_REMOVE_PENDING_BIT, | 1424 | test_bit(MTIP_DDF_REMOVE_PENDING_BIT, |
1394 | &port->dd->dd_flag)) { | 1425 | &dd->dd_flag)) { |
1395 | rv = -ENXIO; | 1426 | rv = -ENXIO; |
1396 | goto exec_ic_exit; | 1427 | goto exec_ic_exit; |
1397 | } | 1428 | } |
1398 | if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) { | 1429 | port_stat = readl(port->mmio + PORT_IRQ_STAT); |
1399 | atomic_inc(&int_cmd->active); /* error */ | 1430 | if (!port_stat) |
1400 | break; | 1431 | continue; |
1432 | |||
1433 | if (port_stat & PORT_IRQ_ERR) { | ||
1434 | dev_err(&dd->pdev->dev, | ||
1435 | "Internal command [%02X] failed\n", | ||
1436 | fis->command); | ||
1437 | mtip_device_reset(dd); | ||
1438 | rv = -EIO; | ||
1439 | goto exec_ic_exit; | ||
1440 | } else { | ||
1441 | writel(port_stat, port->mmio + PORT_IRQ_STAT); | ||
1442 | hba_stat = readl(dd->mmio + HOST_IRQ_STAT); | ||
1443 | if (hba_stat) | ||
1444 | writel(hba_stat, | ||
1445 | dd->mmio + HOST_IRQ_STAT); | ||
1401 | } | 1446 | } |
1447 | break; | ||
1402 | } | 1448 | } |
1403 | } | 1449 | } |
1404 | 1450 | ||
1405 | if (atomic_read(&int_cmd->active) > 1) { | ||
1406 | dev_err(&port->dd->pdev->dev, | ||
1407 | "Internal command [%02X] failed\n", fis->command); | ||
1408 | rv = -EIO; | ||
1409 | } | ||
1410 | if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) | 1451 | if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) |
1411 | & (1 << MTIP_TAG_INTERNAL)) { | 1452 | & (1 << MTIP_TAG_INTERNAL)) { |
1412 | rv = -ENXIO; | 1453 | rv = -ENXIO; |
1413 | if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, | 1454 | if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { |
1414 | &port->dd->dd_flag)) { | 1455 | mtip_device_reset(dd); |
1415 | mtip_restart_port(port); | ||
1416 | rv = -EAGAIN; | 1456 | rv = -EAGAIN; |
1417 | } | 1457 | } |
1418 | } | 1458 | } |
@@ -1724,7 +1764,8 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, | |||
1724 | * -EINVAL Invalid parameters passed in, trim not supported | 1764 | * -EINVAL Invalid parameters passed in, trim not supported |
1725 | * -EIO Error submitting trim request to hw | 1765 | * -EIO Error submitting trim request to hw |
1726 | */ | 1766 | */ |
1727 | static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len) | 1767 | static int mtip_send_trim(struct driver_data *dd, unsigned int lba, |
1768 | unsigned int len) | ||
1728 | { | 1769 | { |
1729 | int i, rv = 0; | 1770 | int i, rv = 0; |
1730 | u64 tlba, tlen, sect_left; | 1771 | u64 tlba, tlen, sect_left; |
@@ -1811,45 +1852,6 @@ static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors) | |||
1811 | } | 1852 | } |
1812 | 1853 | ||
1813 | /* | 1854 | /* |
1814 | * Reset the HBA. | ||
1815 | * | ||
1816 | * Resets the HBA by setting the HBA Reset bit in the Global | ||
1817 | * HBA Control register. After setting the HBA Reset bit the | ||
1818 | * function waits for 1 second before reading the HBA Reset | ||
1819 | * bit to make sure it has cleared. If HBA Reset is not clear | ||
1820 | * an error is returned. Cannot be used in non-blockable | ||
1821 | * context. | ||
1822 | * | ||
1823 | * @dd Pointer to the driver data structure. | ||
1824 | * | ||
1825 | * return value | ||
1826 | * 0 The reset was successful. | ||
1827 | * -1 The HBA Reset bit did not clear. | ||
1828 | */ | ||
1829 | static int mtip_hba_reset(struct driver_data *dd) | ||
1830 | { | ||
1831 | mtip_deinit_port(dd->port); | ||
1832 | |||
1833 | /* Set the reset bit */ | ||
1834 | writel(HOST_RESET, dd->mmio + HOST_CTL); | ||
1835 | |||
1836 | /* Flush */ | ||
1837 | readl(dd->mmio + HOST_CTL); | ||
1838 | |||
1839 | /* Wait for reset to clear */ | ||
1840 | ssleep(1); | ||
1841 | |||
1842 | /* Check the bit has cleared */ | ||
1843 | if (readl(dd->mmio + HOST_CTL) & HOST_RESET) { | ||
1844 | dev_err(&dd->pdev->dev, | ||
1845 | "Reset bit did not clear.\n"); | ||
1846 | return -1; | ||
1847 | } | ||
1848 | |||
1849 | return 0; | ||
1850 | } | ||
1851 | |||
1852 | /* | ||
1853 | * Display the identify command data. | 1855 | * Display the identify command data. |
1854 | * | 1856 | * |
1855 | * @port Pointer to the port data structure. | 1857 | * @port Pointer to the port data structure. |
@@ -2710,6 +2712,100 @@ static ssize_t mtip_hw_show_status(struct device *dev, | |||
2710 | 2712 | ||
2711 | static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); | 2713 | static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); |
2712 | 2714 | ||
2715 | /* debugsfs entries */ | ||
2716 | |||
2717 | static ssize_t show_device_status(struct device_driver *drv, char *buf) | ||
2718 | { | ||
2719 | int size = 0; | ||
2720 | struct driver_data *dd, *tmp; | ||
2721 | unsigned long flags; | ||
2722 | char id_buf[42]; | ||
2723 | u16 status = 0; | ||
2724 | |||
2725 | spin_lock_irqsave(&dev_lock, flags); | ||
2726 | size += sprintf(&buf[size], "Devices Present:\n"); | ||
2727 | list_for_each_entry_safe(dd, tmp, &online_list, online_list) { | ||
2728 | if (dd->pdev) { | ||
2729 | if (dd->port && | ||
2730 | dd->port->identify && | ||
2731 | dd->port->identify_valid) { | ||
2732 | strlcpy(id_buf, | ||
2733 | (char *) (dd->port->identify + 10), 21); | ||
2734 | status = *(dd->port->identify + 141); | ||
2735 | } else { | ||
2736 | memset(id_buf, 0, 42); | ||
2737 | status = 0; | ||
2738 | } | ||
2739 | |||
2740 | if (dd->port && | ||
2741 | test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { | ||
2742 | size += sprintf(&buf[size], | ||
2743 | " device %s %s (ftl rebuild %d %%)\n", | ||
2744 | dev_name(&dd->pdev->dev), | ||
2745 | id_buf, | ||
2746 | status); | ||
2747 | } else { | ||
2748 | size += sprintf(&buf[size], | ||
2749 | " device %s %s\n", | ||
2750 | dev_name(&dd->pdev->dev), | ||
2751 | id_buf); | ||
2752 | } | ||
2753 | } | ||
2754 | } | ||
2755 | |||
2756 | size += sprintf(&buf[size], "Devices Being Removed:\n"); | ||
2757 | list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) { | ||
2758 | if (dd->pdev) { | ||
2759 | if (dd->port && | ||
2760 | dd->port->identify && | ||
2761 | dd->port->identify_valid) { | ||
2762 | strlcpy(id_buf, | ||
2763 | (char *) (dd->port->identify+10), 21); | ||
2764 | status = *(dd->port->identify + 141); | ||
2765 | } else { | ||
2766 | memset(id_buf, 0, 42); | ||
2767 | status = 0; | ||
2768 | } | ||
2769 | |||
2770 | if (dd->port && | ||
2771 | test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { | ||
2772 | size += sprintf(&buf[size], | ||
2773 | " device %s %s (ftl rebuild %d %%)\n", | ||
2774 | dev_name(&dd->pdev->dev), | ||
2775 | id_buf, | ||
2776 | status); | ||
2777 | } else { | ||
2778 | size += sprintf(&buf[size], | ||
2779 | " device %s %s\n", | ||
2780 | dev_name(&dd->pdev->dev), | ||
2781 | id_buf); | ||
2782 | } | ||
2783 | } | ||
2784 | } | ||
2785 | spin_unlock_irqrestore(&dev_lock, flags); | ||
2786 | |||
2787 | return size; | ||
2788 | } | ||
2789 | |||
2790 | static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, | ||
2791 | size_t len, loff_t *offset) | ||
2792 | { | ||
2793 | int size = *offset; | ||
2794 | char buf[MTIP_DFS_MAX_BUF_SIZE]; | ||
2795 | |||
2796 | if (!len || *offset) | ||
2797 | return 0; | ||
2798 | |||
2799 | size += show_device_status(NULL, buf); | ||
2800 | |||
2801 | *offset = size <= len ? size : len; | ||
2802 | size = copy_to_user(ubuf, buf, *offset); | ||
2803 | if (size) | ||
2804 | return -EFAULT; | ||
2805 | |||
2806 | return *offset; | ||
2807 | } | ||
2808 | |||
2713 | static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, | 2809 | static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, |
2714 | size_t len, loff_t *offset) | 2810 | size_t len, loff_t *offset) |
2715 | { | 2811 | { |
@@ -2804,6 +2900,13 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, | |||
2804 | return *offset; | 2900 | return *offset; |
2805 | } | 2901 | } |
2806 | 2902 | ||
2903 | static const struct file_operations mtip_device_status_fops = { | ||
2904 | .owner = THIS_MODULE, | ||
2905 | .open = simple_open, | ||
2906 | .read = mtip_hw_read_device_status, | ||
2907 | .llseek = no_llseek, | ||
2908 | }; | ||
2909 | |||
2807 | static const struct file_operations mtip_regs_fops = { | 2910 | static const struct file_operations mtip_regs_fops = { |
2808 | .owner = THIS_MODULE, | 2911 | .owner = THIS_MODULE, |
2809 | .open = simple_open, | 2912 | .open = simple_open, |
@@ -4161,6 +4264,7 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
4161 | const struct cpumask *node_mask; | 4264 | const struct cpumask *node_mask; |
4162 | int cpu, i = 0, j = 0; | 4265 | int cpu, i = 0, j = 0; |
4163 | int my_node = NUMA_NO_NODE; | 4266 | int my_node = NUMA_NO_NODE; |
4267 | unsigned long flags; | ||
4164 | 4268 | ||
4165 | /* Allocate memory for this devices private data. */ | 4269 | /* Allocate memory for this devices private data. */ |
4166 | my_node = pcibus_to_node(pdev->bus); | 4270 | my_node = pcibus_to_node(pdev->bus); |
@@ -4218,6 +4322,9 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
4218 | dd->pdev = pdev; | 4322 | dd->pdev = pdev; |
4219 | dd->numa_node = my_node; | 4323 | dd->numa_node = my_node; |
4220 | 4324 | ||
4325 | INIT_LIST_HEAD(&dd->online_list); | ||
4326 | INIT_LIST_HEAD(&dd->remove_list); | ||
4327 | |||
4221 | memset(dd->workq_name, 0, 32); | 4328 | memset(dd->workq_name, 0, 32); |
4222 | snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); | 4329 | snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); |
4223 | 4330 | ||
@@ -4305,6 +4412,14 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
4305 | instance++; | 4412 | instance++; |
4306 | if (rv != MTIP_FTL_REBUILD_MAGIC) | 4413 | if (rv != MTIP_FTL_REBUILD_MAGIC) |
4307 | set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); | 4414 | set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); |
4415 | else | ||
4416 | rv = 0; /* device in rebuild state, return 0 from probe */ | ||
4417 | |||
4418 | /* Add to online list even if in ftl rebuild */ | ||
4419 | spin_lock_irqsave(&dev_lock, flags); | ||
4420 | list_add(&dd->online_list, &online_list); | ||
4421 | spin_unlock_irqrestore(&dev_lock, flags); | ||
4422 | |||
4308 | goto done; | 4423 | goto done; |
4309 | 4424 | ||
4310 | block_initialize_err: | 4425 | block_initialize_err: |
@@ -4338,9 +4453,15 @@ static void mtip_pci_remove(struct pci_dev *pdev) | |||
4338 | { | 4453 | { |
4339 | struct driver_data *dd = pci_get_drvdata(pdev); | 4454 | struct driver_data *dd = pci_get_drvdata(pdev); |
4340 | int counter = 0; | 4455 | int counter = 0; |
4456 | unsigned long flags; | ||
4341 | 4457 | ||
4342 | set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); | 4458 | set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); |
4343 | 4459 | ||
4460 | spin_lock_irqsave(&dev_lock, flags); | ||
4461 | list_del_init(&dd->online_list); | ||
4462 | list_add(&dd->remove_list, &removing_list); | ||
4463 | spin_unlock_irqrestore(&dev_lock, flags); | ||
4464 | |||
4344 | if (mtip_check_surprise_removal(pdev)) { | 4465 | if (mtip_check_surprise_removal(pdev)) { |
4345 | while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { | 4466 | while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { |
4346 | counter++; | 4467 | counter++; |
@@ -4366,6 +4487,10 @@ static void mtip_pci_remove(struct pci_dev *pdev) | |||
4366 | 4487 | ||
4367 | pci_disable_msi(pdev); | 4488 | pci_disable_msi(pdev); |
4368 | 4489 | ||
4490 | spin_lock_irqsave(&dev_lock, flags); | ||
4491 | list_del_init(&dd->remove_list); | ||
4492 | spin_unlock_irqrestore(&dev_lock, flags); | ||
4493 | |||
4369 | kfree(dd); | 4494 | kfree(dd); |
4370 | pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); | 4495 | pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); |
4371 | } | 4496 | } |
@@ -4513,6 +4638,11 @@ static int __init mtip_init(void) | |||
4513 | 4638 | ||
4514 | pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); | 4639 | pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); |
4515 | 4640 | ||
4641 | spin_lock_init(&dev_lock); | ||
4642 | |||
4643 | INIT_LIST_HEAD(&online_list); | ||
4644 | INIT_LIST_HEAD(&removing_list); | ||
4645 | |||
4516 | /* Allocate a major block device number to use with this driver. */ | 4646 | /* Allocate a major block device number to use with this driver. */ |
4517 | error = register_blkdev(0, MTIP_DRV_NAME); | 4647 | error = register_blkdev(0, MTIP_DRV_NAME); |
4518 | if (error <= 0) { | 4648 | if (error <= 0) { |
@@ -4522,11 +4652,18 @@ static int __init mtip_init(void) | |||
4522 | } | 4652 | } |
4523 | mtip_major = error; | 4653 | mtip_major = error; |
4524 | 4654 | ||
4525 | if (!dfs_parent) { | 4655 | dfs_parent = debugfs_create_dir("rssd", NULL); |
4526 | dfs_parent = debugfs_create_dir("rssd", NULL); | 4656 | if (IS_ERR_OR_NULL(dfs_parent)) { |
4527 | if (IS_ERR_OR_NULL(dfs_parent)) { | 4657 | pr_warn("Error creating debugfs parent\n"); |
4528 | pr_warn("Error creating debugfs parent\n"); | 4658 | dfs_parent = NULL; |
4529 | dfs_parent = NULL; | 4659 | } |
4660 | if (dfs_parent) { | ||
4661 | dfs_device_status = debugfs_create_file("device_status", | ||
4662 | S_IRUGO, dfs_parent, NULL, | ||
4663 | &mtip_device_status_fops); | ||
4664 | if (IS_ERR_OR_NULL(dfs_device_status)) { | ||
4665 | pr_err("Error creating device_status node\n"); | ||
4666 | dfs_device_status = NULL; | ||
4530 | } | 4667 | } |
4531 | } | 4668 | } |
4532 | 4669 | ||
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 3bffff5f670c..8e8334c9dd0f 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h | |||
@@ -129,9 +129,9 @@ enum { | |||
129 | MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */ | 129 | MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */ |
130 | MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */ | 130 | MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */ |
131 | MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */ | 131 | MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */ |
132 | MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \ | 132 | MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | |
133 | (1 << MTIP_PF_EH_ACTIVE_BIT) | \ | 133 | (1 << MTIP_PF_EH_ACTIVE_BIT) | |
134 | (1 << MTIP_PF_SE_ACTIVE_BIT) | \ | 134 | (1 << MTIP_PF_SE_ACTIVE_BIT) | |
135 | (1 << MTIP_PF_DM_ACTIVE_BIT)), | 135 | (1 << MTIP_PF_DM_ACTIVE_BIT)), |
136 | 136 | ||
137 | MTIP_PF_SVC_THD_ACTIVE_BIT = 4, | 137 | MTIP_PF_SVC_THD_ACTIVE_BIT = 4, |
@@ -144,9 +144,9 @@ enum { | |||
144 | MTIP_DDF_REMOVE_PENDING_BIT = 1, | 144 | MTIP_DDF_REMOVE_PENDING_BIT = 1, |
145 | MTIP_DDF_OVER_TEMP_BIT = 2, | 145 | MTIP_DDF_OVER_TEMP_BIT = 2, |
146 | MTIP_DDF_WRITE_PROTECT_BIT = 3, | 146 | MTIP_DDF_WRITE_PROTECT_BIT = 3, |
147 | MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ | 147 | MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | |
148 | (1 << MTIP_DDF_SEC_LOCK_BIT) | \ | 148 | (1 << MTIP_DDF_SEC_LOCK_BIT) | |
149 | (1 << MTIP_DDF_OVER_TEMP_BIT) | \ | 149 | (1 << MTIP_DDF_OVER_TEMP_BIT) | |
150 | (1 << MTIP_DDF_WRITE_PROTECT_BIT)), | 150 | (1 << MTIP_DDF_WRITE_PROTECT_BIT)), |
151 | 151 | ||
152 | MTIP_DDF_CLEANUP_BIT = 5, | 152 | MTIP_DDF_CLEANUP_BIT = 5, |
@@ -180,7 +180,7 @@ struct mtip_work { | |||
180 | 180 | ||
181 | #define MTIP_TRIM_TIMEOUT_MS 240000 | 181 | #define MTIP_TRIM_TIMEOUT_MS 240000 |
182 | #define MTIP_MAX_TRIM_ENTRIES 8 | 182 | #define MTIP_MAX_TRIM_ENTRIES 8 |
183 | #define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 | 183 | #define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 |
184 | 184 | ||
185 | struct mtip_trim_entry { | 185 | struct mtip_trim_entry { |
186 | u32 lba; /* starting lba of region */ | 186 | u32 lba; /* starting lba of region */ |
@@ -501,6 +501,10 @@ struct driver_data { | |||
501 | atomic_t irq_workers_active; | 501 | atomic_t irq_workers_active; |
502 | 502 | ||
503 | int isr_binding; | 503 | int isr_binding; |
504 | |||
505 | struct list_head online_list; /* linkage for online list */ | ||
506 | |||
507 | struct list_head remove_list; /* linkage for removing list */ | ||
504 | }; | 508 | }; |
505 | 509 | ||
506 | #endif | 510 | #endif |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index f556f8a8b3f9..b7b7a88d9f68 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -1742,9 +1742,10 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request) | |||
1742 | struct rbd_device *rbd_dev = img_request->rbd_dev; | 1742 | struct rbd_device *rbd_dev = img_request->rbd_dev; |
1743 | struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; | 1743 | struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; |
1744 | struct rbd_obj_request *obj_request; | 1744 | struct rbd_obj_request *obj_request; |
1745 | struct rbd_obj_request *next_obj_request; | ||
1745 | 1746 | ||
1746 | dout("%s: img %p\n", __func__, img_request); | 1747 | dout("%s: img %p\n", __func__, img_request); |
1747 | for_each_obj_request(img_request, obj_request) { | 1748 | for_each_obj_request_safe(img_request, obj_request, next_obj_request) { |
1748 | int ret; | 1749 | int ret; |
1749 | 1750 | ||
1750 | obj_request->callback = rbd_img_obj_callback; | 1751 | obj_request->callback = rbd_img_obj_callback; |
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index e3f9a99b8522..d784650d14f0 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
@@ -373,26 +373,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma) | |||
373 | struct hpet_dev *devp; | 373 | struct hpet_dev *devp; |
374 | unsigned long addr; | 374 | unsigned long addr; |
375 | 375 | ||
376 | if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) | ||
377 | return -EINVAL; | ||
378 | |||
379 | devp = file->private_data; | 376 | devp = file->private_data; |
380 | addr = devp->hd_hpets->hp_hpet_phys; | 377 | addr = devp->hd_hpets->hp_hpet_phys; |
381 | 378 | ||
382 | if (addr & (PAGE_SIZE - 1)) | 379 | if (addr & (PAGE_SIZE - 1)) |
383 | return -ENOSYS; | 380 | return -ENOSYS; |
384 | 381 | ||
385 | vma->vm_flags |= VM_IO; | ||
386 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 382 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
387 | 383 | return vm_iomap_memory(vma, addr, PAGE_SIZE); | |
388 | if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, | ||
389 | PAGE_SIZE, vma->vm_page_prot)) { | ||
390 | printk(KERN_ERR "%s: io_remap_pfn_range failed\n", | ||
391 | __func__); | ||
392 | return -EAGAIN; | ||
393 | } | ||
394 | |||
395 | return 0; | ||
396 | #else | 384 | #else |
397 | return -ENOSYS; | 385 | return -ENOSYS; |
398 | #endif | 386 | #endif |
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 69ae5972713c..a0f7724852eb 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
@@ -380,6 +380,15 @@ void hwrng_unregister(struct hwrng *rng) | |||
380 | } | 380 | } |
381 | EXPORT_SYMBOL_GPL(hwrng_unregister); | 381 | EXPORT_SYMBOL_GPL(hwrng_unregister); |
382 | 382 | ||
383 | static void __exit hwrng_exit(void) | ||
384 | { | ||
385 | mutex_lock(&rng_mutex); | ||
386 | BUG_ON(current_rng); | ||
387 | kfree(rng_buffer); | ||
388 | mutex_unlock(&rng_mutex); | ||
389 | } | ||
390 | |||
391 | module_exit(hwrng_exit); | ||
383 | 392 | ||
384 | MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); | 393 | MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); |
385 | MODULE_LICENSE("GPL"); | 394 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index e905d5f53051..ce5f3fc25d6d 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -149,7 +149,8 @@ struct ports_device { | |||
149 | spinlock_t ports_lock; | 149 | spinlock_t ports_lock; |
150 | 150 | ||
151 | /* To protect the vq operations for the control channel */ | 151 | /* To protect the vq operations for the control channel */ |
152 | spinlock_t cvq_lock; | 152 | spinlock_t c_ivq_lock; |
153 | spinlock_t c_ovq_lock; | ||
153 | 154 | ||
154 | /* The current config space is stored here */ | 155 | /* The current config space is stored here */ |
155 | struct virtio_console_config config; | 156 | struct virtio_console_config config; |
@@ -569,11 +570,14 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, | |||
569 | vq = portdev->c_ovq; | 570 | vq = portdev->c_ovq; |
570 | 571 | ||
571 | sg_init_one(sg, &cpkt, sizeof(cpkt)); | 572 | sg_init_one(sg, &cpkt, sizeof(cpkt)); |
573 | |||
574 | spin_lock(&portdev->c_ovq_lock); | ||
572 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { | 575 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { |
573 | virtqueue_kick(vq); | 576 | virtqueue_kick(vq); |
574 | while (!virtqueue_get_buf(vq, &len)) | 577 | while (!virtqueue_get_buf(vq, &len)) |
575 | cpu_relax(); | 578 | cpu_relax(); |
576 | } | 579 | } |
580 | spin_unlock(&portdev->c_ovq_lock); | ||
577 | return 0; | 581 | return 0; |
578 | } | 582 | } |
579 | 583 | ||
@@ -1436,7 +1440,7 @@ static int add_port(struct ports_device *portdev, u32 id) | |||
1436 | * rproc_serial does not want the console port, only | 1440 | * rproc_serial does not want the console port, only |
1437 | * the generic port implementation. | 1441 | * the generic port implementation. |
1438 | */ | 1442 | */ |
1439 | port->host_connected = port->guest_connected = true; | 1443 | port->host_connected = true; |
1440 | else if (!use_multiport(port->portdev)) { | 1444 | else if (!use_multiport(port->portdev)) { |
1441 | /* | 1445 | /* |
1442 | * If we're not using multiport support, | 1446 | * If we're not using multiport support, |
@@ -1709,23 +1713,23 @@ static void control_work_handler(struct work_struct *work) | |||
1709 | portdev = container_of(work, struct ports_device, control_work); | 1713 | portdev = container_of(work, struct ports_device, control_work); |
1710 | vq = portdev->c_ivq; | 1714 | vq = portdev->c_ivq; |
1711 | 1715 | ||
1712 | spin_lock(&portdev->cvq_lock); | 1716 | spin_lock(&portdev->c_ivq_lock); |
1713 | while ((buf = virtqueue_get_buf(vq, &len))) { | 1717 | while ((buf = virtqueue_get_buf(vq, &len))) { |
1714 | spin_unlock(&portdev->cvq_lock); | 1718 | spin_unlock(&portdev->c_ivq_lock); |
1715 | 1719 | ||
1716 | buf->len = len; | 1720 | buf->len = len; |
1717 | buf->offset = 0; | 1721 | buf->offset = 0; |
1718 | 1722 | ||
1719 | handle_control_message(portdev, buf); | 1723 | handle_control_message(portdev, buf); |
1720 | 1724 | ||
1721 | spin_lock(&portdev->cvq_lock); | 1725 | spin_lock(&portdev->c_ivq_lock); |
1722 | if (add_inbuf(portdev->c_ivq, buf) < 0) { | 1726 | if (add_inbuf(portdev->c_ivq, buf) < 0) { |
1723 | dev_warn(&portdev->vdev->dev, | 1727 | dev_warn(&portdev->vdev->dev, |
1724 | "Error adding buffer to queue\n"); | 1728 | "Error adding buffer to queue\n"); |
1725 | free_buf(buf, false); | 1729 | free_buf(buf, false); |
1726 | } | 1730 | } |
1727 | } | 1731 | } |
1728 | spin_unlock(&portdev->cvq_lock); | 1732 | spin_unlock(&portdev->c_ivq_lock); |
1729 | } | 1733 | } |
1730 | 1734 | ||
1731 | static void out_intr(struct virtqueue *vq) | 1735 | static void out_intr(struct virtqueue *vq) |
@@ -1752,13 +1756,23 @@ static void in_intr(struct virtqueue *vq) | |||
1752 | port->inbuf = get_inbuf(port); | 1756 | port->inbuf = get_inbuf(port); |
1753 | 1757 | ||
1754 | /* | 1758 | /* |
1755 | * Don't queue up data when port is closed. This condition | 1759 | * Normally the port should not accept data when the port is |
1760 | * closed. For generic serial ports, the host won't (shouldn't) | ||
1761 | * send data till the guest is connected. But this condition | ||
1756 | * can be reached when a console port is not yet connected (no | 1762 | * can be reached when a console port is not yet connected (no |
1757 | * tty is spawned) and the host sends out data to console | 1763 | * tty is spawned) and the other side sends out data over the |
1758 | * ports. For generic serial ports, the host won't | 1764 | * vring, or when a remote devices start sending data before |
1759 | * (shouldn't) send data till the guest is connected. | 1765 | * the ports are opened. |
1766 | * | ||
1767 | * A generic serial port will discard data if not connected, | ||
1768 | * while console ports and rproc-serial ports accepts data at | ||
1769 | * any time. rproc-serial is initiated with guest_connected to | ||
1770 | * false because port_fops_open expects this. Console ports are | ||
1771 | * hooked up with an HVC console and is initialized with | ||
1772 | * guest_connected to true. | ||
1760 | */ | 1773 | */ |
1761 | if (!port->guest_connected) | 1774 | |
1775 | if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev)) | ||
1762 | discard_port_data(port); | 1776 | discard_port_data(port); |
1763 | 1777 | ||
1764 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | 1778 | spin_unlock_irqrestore(&port->inbuf_lock, flags); |
@@ -1986,10 +2000,12 @@ static int virtcons_probe(struct virtio_device *vdev) | |||
1986 | if (multiport) { | 2000 | if (multiport) { |
1987 | unsigned int nr_added_bufs; | 2001 | unsigned int nr_added_bufs; |
1988 | 2002 | ||
1989 | spin_lock_init(&portdev->cvq_lock); | 2003 | spin_lock_init(&portdev->c_ivq_lock); |
2004 | spin_lock_init(&portdev->c_ovq_lock); | ||
1990 | INIT_WORK(&portdev->control_work, &control_work_handler); | 2005 | INIT_WORK(&portdev->control_work, &control_work_handler); |
1991 | 2006 | ||
1992 | nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); | 2007 | nr_added_bufs = fill_queue(portdev->c_ivq, |
2008 | &portdev->c_ivq_lock); | ||
1993 | if (!nr_added_bufs) { | 2009 | if (!nr_added_bufs) { |
1994 | dev_err(&vdev->dev, | 2010 | dev_err(&vdev->dev, |
1995 | "Error allocating buffers for control queue\n"); | 2011 | "Error allocating buffers for control queue\n"); |
@@ -2140,7 +2156,7 @@ static int virtcons_restore(struct virtio_device *vdev) | |||
2140 | return ret; | 2156 | return ret; |
2141 | 2157 | ||
2142 | if (use_multiport(portdev)) | 2158 | if (use_multiport(portdev)) |
2143 | fill_queue(portdev->c_ivq, &portdev->cvq_lock); | 2159 | fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); |
2144 | 2160 | ||
2145 | list_for_each_entry(port, &portdev->ports, list) { | 2161 | list_for_each_entry(port, &portdev->ports, list) { |
2146 | port->in_vq = portdev->in_vqs[port->id]; | 2162 | port->in_vq = portdev->in_vqs[port->id]; |
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index 1e2de7305362..f873dcefe0de 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c | |||
@@ -703,7 +703,7 @@ static void tegra20_pll_init(void) | |||
703 | clks[pll_a_out0] = clk; | 703 | clks[pll_a_out0] = clk; |
704 | 704 | ||
705 | /* PLLE */ | 705 | /* PLLE */ |
706 | clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL, | 706 | clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, pmc_base, |
707 | 0, 100000000, &pll_e_params, | 707 | 0, 100000000, &pll_e_params, |
708 | 0, pll_e_freq_table, NULL); | 708 | 0, pll_e_freq_table, NULL); |
709 | clk_register_clkdev(clk, "pll_e", NULL); | 709 | clk_register_clkdev(clk, "pll_e", NULL); |
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 4e5b7fb8927c..37d23a0f8c56 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c | |||
@@ -178,10 +178,16 @@ static struct cpufreq_driver cpu0_cpufreq_driver = { | |||
178 | 178 | ||
179 | static int cpu0_cpufreq_probe(struct platform_device *pdev) | 179 | static int cpu0_cpufreq_probe(struct platform_device *pdev) |
180 | { | 180 | { |
181 | struct device_node *np; | 181 | struct device_node *np, *parent; |
182 | int ret; | 182 | int ret; |
183 | 183 | ||
184 | for_each_child_of_node(of_find_node_by_path("/cpus"), np) { | 184 | parent = of_find_node_by_path("/cpus"); |
185 | if (!parent) { | ||
186 | pr_err("failed to find OF /cpus\n"); | ||
187 | return -ENOENT; | ||
188 | } | ||
189 | |||
190 | for_each_child_of_node(parent, np) { | ||
185 | if (of_get_property(np, "operating-points", NULL)) | 191 | if (of_get_property(np, "operating-points", NULL)) |
186 | break; | 192 | break; |
187 | } | 193 | } |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 46bde01eee62..cc4bd2f6838a 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -14,8 +14,8 @@ | |||
14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #ifndef _CPUFREQ_GOVERNER_H | 17 | #ifndef _CPUFREQ_GOVERNOR_H |
18 | #define _CPUFREQ_GOVERNER_H | 18 | #define _CPUFREQ_GOVERNOR_H |
19 | 19 | ||
20 | #include <linux/cpufreq.h> | 20 | #include <linux/cpufreq.h> |
21 | #include <linux/kobject.h> | 21 | #include <linux/kobject.h> |
@@ -175,4 +175,4 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs, | |||
175 | unsigned int sampling_rate); | 175 | unsigned int sampling_rate); |
176 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, | 176 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, |
177 | struct cpufreq_policy *policy, unsigned int event); | 177 | struct cpufreq_policy *policy, unsigned int event); |
178 | #endif /* _CPUFREQ_GOVERNER_H */ | 178 | #endif /* _CPUFREQ_GOVERNOR_H */ |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ad72922919ed..6133ef5cf671 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -502,7 +502,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) | |||
502 | 502 | ||
503 | sample_time = cpu->pstate_policy->sample_rate_ms; | 503 | sample_time = cpu->pstate_policy->sample_rate_ms; |
504 | delay = msecs_to_jiffies(sample_time); | 504 | delay = msecs_to_jiffies(sample_time); |
505 | delay -= jiffies % delay; | ||
506 | mod_timer_pinned(&cpu->timer, jiffies + delay); | 505 | mod_timer_pinned(&cpu->timer, jiffies + delay); |
507 | } | 506 | } |
508 | 507 | ||
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 8bc5fef07e7a..22c9063e0120 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c | |||
@@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = { | |||
1750 | .shutdown = ux500_cryp_shutdown, | 1750 | .shutdown = ux500_cryp_shutdown, |
1751 | .driver = { | 1751 | .driver = { |
1752 | .owner = THIS_MODULE, | 1752 | .owner = THIS_MODULE, |
1753 | .name = "cryp1" | 1753 | .name = "cryp1", |
1754 | .pm = &ux500_cryp_pm, | 1754 | .pm = &ux500_cryp_pm, |
1755 | } | 1755 | } |
1756 | }; | 1756 | }; |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 80b69971cf28..aeaea32bcfda 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -83,6 +83,7 @@ config INTEL_IOP_ADMA | |||
83 | 83 | ||
84 | config DW_DMAC | 84 | config DW_DMAC |
85 | tristate "Synopsys DesignWare AHB DMA support" | 85 | tristate "Synopsys DesignWare AHB DMA support" |
86 | depends on GENERIC_HARDIRQS | ||
86 | select DMA_ENGINE | 87 | select DMA_ENGINE |
87 | default y if CPU_AT32AP7000 | 88 | default y if CPU_AT32AP7000 |
88 | help | 89 | help |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 6e13f262139a..88cfc61329d2 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -310,8 +310,6 @@ static void atc_complete_all(struct at_dma_chan *atchan) | |||
310 | 310 | ||
311 | dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); | 311 | dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); |
312 | 312 | ||
313 | BUG_ON(atc_chan_is_enabled(atchan)); | ||
314 | |||
315 | /* | 313 | /* |
316 | * Submit queued descriptors ASAP, i.e. before we go through | 314 | * Submit queued descriptors ASAP, i.e. before we go through |
317 | * the completed ones. | 315 | * the completed ones. |
@@ -368,6 +366,9 @@ static void atc_advance_work(struct at_dma_chan *atchan) | |||
368 | { | 366 | { |
369 | dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); | 367 | dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); |
370 | 368 | ||
369 | if (atc_chan_is_enabled(atchan)) | ||
370 | return; | ||
371 | |||
371 | if (list_empty(&atchan->active_list) || | 372 | if (list_empty(&atchan->active_list) || |
372 | list_is_singular(&atchan->active_list)) { | 373 | list_is_singular(&atchan->active_list)) { |
373 | atc_complete_all(atchan); | 374 | atc_complete_all(atchan); |
@@ -1078,9 +1079,7 @@ static void atc_issue_pending(struct dma_chan *chan) | |||
1078 | return; | 1079 | return; |
1079 | 1080 | ||
1080 | spin_lock_irqsave(&atchan->lock, flags); | 1081 | spin_lock_irqsave(&atchan->lock, flags); |
1081 | if (!atc_chan_is_enabled(atchan)) { | 1082 | atc_advance_work(atchan); |
1082 | atc_advance_work(atchan); | ||
1083 | } | ||
1084 | spin_unlock_irqrestore(&atchan->lock, flags); | 1083 | spin_unlock_irqrestore(&atchan->lock, flags); |
1085 | } | 1084 | } |
1086 | 1085 | ||
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index c4b4fd2acc42..08b43bf37158 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -276,12 +276,20 @@ static void omap_dma_issue_pending(struct dma_chan *chan) | |||
276 | 276 | ||
277 | spin_lock_irqsave(&c->vc.lock, flags); | 277 | spin_lock_irqsave(&c->vc.lock, flags); |
278 | if (vchan_issue_pending(&c->vc) && !c->desc) { | 278 | if (vchan_issue_pending(&c->vc) && !c->desc) { |
279 | struct omap_dmadev *d = to_omap_dma_dev(chan->device); | 279 | /* |
280 | spin_lock(&d->lock); | 280 | * c->cyclic is used only by audio and in this case the DMA need |
281 | if (list_empty(&c->node)) | 281 | * to be started without delay. |
282 | list_add_tail(&c->node, &d->pending); | 282 | */ |
283 | spin_unlock(&d->lock); | 283 | if (!c->cyclic) { |
284 | tasklet_schedule(&d->task); | 284 | struct omap_dmadev *d = to_omap_dma_dev(chan->device); |
285 | spin_lock(&d->lock); | ||
286 | if (list_empty(&c->node)) | ||
287 | list_add_tail(&c->node, &d->pending); | ||
288 | spin_unlock(&d->lock); | ||
289 | tasklet_schedule(&d->task); | ||
290 | } else { | ||
291 | omap_dma_start_desc(c); | ||
292 | } | ||
285 | } | 293 | } |
286 | spin_unlock_irqrestore(&c->vc.lock, flags); | 294 | spin_unlock_irqrestore(&c->vc.lock, flags); |
287 | } | 295 | } |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 718153122759..5dbc5946c4c3 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -2882,7 +2882,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2882 | { | 2882 | { |
2883 | struct dma_pl330_platdata *pdat; | 2883 | struct dma_pl330_platdata *pdat; |
2884 | struct dma_pl330_dmac *pdmac; | 2884 | struct dma_pl330_dmac *pdmac; |
2885 | struct dma_pl330_chan *pch; | 2885 | struct dma_pl330_chan *pch, *_p; |
2886 | struct pl330_info *pi; | 2886 | struct pl330_info *pi; |
2887 | struct dma_device *pd; | 2887 | struct dma_device *pd; |
2888 | struct resource *res; | 2888 | struct resource *res; |
@@ -2984,7 +2984,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2984 | ret = dma_async_device_register(pd); | 2984 | ret = dma_async_device_register(pd); |
2985 | if (ret) { | 2985 | if (ret) { |
2986 | dev_err(&adev->dev, "unable to register DMAC\n"); | 2986 | dev_err(&adev->dev, "unable to register DMAC\n"); |
2987 | goto probe_err2; | 2987 | goto probe_err3; |
2988 | } | ||
2989 | |||
2990 | if (adev->dev.of_node) { | ||
2991 | ret = of_dma_controller_register(adev->dev.of_node, | ||
2992 | of_dma_pl330_xlate, pdmac); | ||
2993 | if (ret) { | ||
2994 | dev_err(&adev->dev, | ||
2995 | "unable to register DMA to the generic DT DMA helpers\n"); | ||
2996 | } | ||
2988 | } | 2997 | } |
2989 | 2998 | ||
2990 | dev_info(&adev->dev, | 2999 | dev_info(&adev->dev, |
@@ -2995,16 +3004,21 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2995 | pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, | 3004 | pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, |
2996 | pi->pcfg.num_peri, pi->pcfg.num_events); | 3005 | pi->pcfg.num_peri, pi->pcfg.num_events); |
2997 | 3006 | ||
2998 | ret = of_dma_controller_register(adev->dev.of_node, | ||
2999 | of_dma_pl330_xlate, pdmac); | ||
3000 | if (ret) { | ||
3001 | dev_err(&adev->dev, | ||
3002 | "unable to register DMA to the generic DT DMA helpers\n"); | ||
3003 | goto probe_err2; | ||
3004 | } | ||
3005 | |||
3006 | return 0; | 3007 | return 0; |
3008 | probe_err3: | ||
3009 | amba_set_drvdata(adev, NULL); | ||
3007 | 3010 | ||
3011 | /* Idle the DMAC */ | ||
3012 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, | ||
3013 | chan.device_node) { | ||
3014 | |||
3015 | /* Remove the channel */ | ||
3016 | list_del(&pch->chan.device_node); | ||
3017 | |||
3018 | /* Flush the channel */ | ||
3019 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); | ||
3020 | pl330_free_chan_resources(&pch->chan); | ||
3021 | } | ||
3008 | probe_err2: | 3022 | probe_err2: |
3009 | pl330_del(pi); | 3023 | pl330_del(pi); |
3010 | probe_err1: | 3024 | probe_err1: |
@@ -3023,8 +3037,10 @@ static int pl330_remove(struct amba_device *adev) | |||
3023 | if (!pdmac) | 3037 | if (!pdmac) |
3024 | return 0; | 3038 | return 0; |
3025 | 3039 | ||
3026 | of_dma_controller_free(adev->dev.of_node); | 3040 | if (adev->dev.of_node) |
3041 | of_dma_controller_free(adev->dev.of_node); | ||
3027 | 3042 | ||
3043 | dma_async_device_unregister(&pdmac->ddma); | ||
3028 | amba_set_drvdata(adev, NULL); | 3044 | amba_set_drvdata(adev, NULL); |
3029 | 3045 | ||
3030 | /* Idle the DMAC */ | 3046 | /* Idle the DMAC */ |
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c index cdae207028a7..6c3fca97d346 100644 --- a/drivers/eisa/pci_eisa.c +++ b/drivers/eisa/pci_eisa.c | |||
@@ -19,10 +19,10 @@ | |||
19 | /* There is only *one* pci_eisa device per machine, right ? */ | 19 | /* There is only *one* pci_eisa device per machine, right ? */ |
20 | static struct eisa_root_device pci_eisa_root; | 20 | static struct eisa_root_device pci_eisa_root; |
21 | 21 | ||
22 | static int __init pci_eisa_init(struct pci_dev *pdev, | 22 | static int __init pci_eisa_init(struct pci_dev *pdev) |
23 | const struct pci_device_id *ent) | ||
24 | { | 23 | { |
25 | int rc; | 24 | int rc, i; |
25 | struct resource *res, *bus_res = NULL; | ||
26 | 26 | ||
27 | if ((rc = pci_enable_device (pdev))) { | 27 | if ((rc = pci_enable_device (pdev))) { |
28 | printk (KERN_ERR "pci_eisa : Could not enable device %s\n", | 28 | printk (KERN_ERR "pci_eisa : Could not enable device %s\n", |
@@ -30,9 +30,30 @@ static int __init pci_eisa_init(struct pci_dev *pdev, | |||
30 | return rc; | 30 | return rc; |
31 | } | 31 | } |
32 | 32 | ||
33 | /* | ||
34 | * The Intel 82375 PCI-EISA bridge is a subtractive-decode PCI | ||
35 | * device, so the resources available on EISA are the same as those | ||
36 | * available on the 82375 bus. This works the same as a PCI-PCI | ||
37 | * bridge in subtractive-decode mode (see pci_read_bridge_bases()). | ||
38 | * We assume other PCI-EISA bridges are similar. | ||
39 | * | ||
40 | * eisa_root_register() can only deal with a single io port resource, | ||
41 | * so we use the first valid io port resource. | ||
42 | */ | ||
43 | pci_bus_for_each_resource(pdev->bus, res, i) | ||
44 | if (res && (res->flags & IORESOURCE_IO)) { | ||
45 | bus_res = res; | ||
46 | break; | ||
47 | } | ||
48 | |||
49 | if (!bus_res) { | ||
50 | dev_err(&pdev->dev, "No resources available\n"); | ||
51 | return -1; | ||
52 | } | ||
53 | |||
33 | pci_eisa_root.dev = &pdev->dev; | 54 | pci_eisa_root.dev = &pdev->dev; |
34 | pci_eisa_root.res = pdev->bus->resource[0]; | 55 | pci_eisa_root.res = bus_res; |
35 | pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start; | 56 | pci_eisa_root.bus_base_addr = bus_res->start; |
36 | pci_eisa_root.slots = EISA_MAX_SLOTS; | 57 | pci_eisa_root.slots = EISA_MAX_SLOTS; |
37 | pci_eisa_root.dma_mask = pdev->dma_mask; | 58 | pci_eisa_root.dma_mask = pdev->dma_mask; |
38 | dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root); | 59 | dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root); |
@@ -45,22 +66,26 @@ static int __init pci_eisa_init(struct pci_dev *pdev, | |||
45 | return 0; | 66 | return 0; |
46 | } | 67 | } |
47 | 68 | ||
48 | static struct pci_device_id pci_eisa_pci_tbl[] = { | 69 | /* |
49 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 70 | * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init(). |
50 | PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 }, | 71 | * Otherwise pnp resource will get enabled early and could prevent eisa |
51 | { 0, } | 72 | * to be initialized. |
52 | }; | 73 | * Also need to make sure pci_eisa_init_early() is called after |
74 | * x86/pci_subsys_init(). | ||
75 | * So need to use subsys_initcall_sync with it. | ||
76 | */ | ||
77 | static int __init pci_eisa_init_early(void) | ||
78 | { | ||
79 | struct pci_dev *dev = NULL; | ||
80 | int ret; | ||
53 | 81 | ||
54 | static struct pci_driver __refdata pci_eisa_driver = { | 82 | for_each_pci_dev(dev) |
55 | .name = "pci_eisa", | 83 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) { |
56 | .id_table = pci_eisa_pci_tbl, | 84 | ret = pci_eisa_init(dev); |
57 | .probe = pci_eisa_init, | 85 | if (ret) |
58 | }; | 86 | return ret; |
87 | } | ||
59 | 88 | ||
60 | static int __init pci_eisa_init_module (void) | 89 | return 0; |
61 | { | ||
62 | return pci_register_driver (&pci_eisa_driver); | ||
63 | } | 90 | } |
64 | 91 | subsys_initcall_sync(pci_eisa_init_early); | |
65 | device_initcall(pci_eisa_init_module); | ||
66 | MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl); | ||
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 47ae712c9504..221ad1bf94de 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/efi.h> | 1 | #include <linux/efi.h> |
2 | #include <linux/module.h> | 2 | #include <linux/module.h> |
3 | #include <linux/pstore.h> | 3 | #include <linux/pstore.h> |
4 | #include <linux/ucs2_string.h> | ||
4 | 5 | ||
5 | #define DUMP_NAME_LEN 52 | 6 | #define DUMP_NAME_LEN 52 |
6 | 7 | ||
@@ -140,15 +141,15 @@ static int efi_pstore_erase_func(struct efivar_entry *entry, void *data) | |||
140 | efi_guid_t vendor = LINUX_EFI_CRASH_GUID; | 141 | efi_guid_t vendor = LINUX_EFI_CRASH_GUID; |
141 | efi_char16_t efi_name_old[DUMP_NAME_LEN]; | 142 | efi_char16_t efi_name_old[DUMP_NAME_LEN]; |
142 | efi_char16_t *efi_name = ed->name; | 143 | efi_char16_t *efi_name = ed->name; |
143 | unsigned long utf16_len = utf16_strlen(ed->name); | 144 | unsigned long ucs2_len = ucs2_strlen(ed->name); |
144 | char name_old[DUMP_NAME_LEN]; | 145 | char name_old[DUMP_NAME_LEN]; |
145 | int i; | 146 | int i; |
146 | 147 | ||
147 | if (efi_guidcmp(entry->var.VendorGuid, vendor)) | 148 | if (efi_guidcmp(entry->var.VendorGuid, vendor)) |
148 | return 0; | 149 | return 0; |
149 | 150 | ||
150 | if (utf16_strncmp(entry->var.VariableName, | 151 | if (ucs2_strncmp(entry->var.VariableName, |
151 | efi_name, (size_t)utf16_len)) { | 152 | efi_name, (size_t)ucs2_len)) { |
152 | /* | 153 | /* |
153 | * Check if an old format, which doesn't support | 154 | * Check if an old format, which doesn't support |
154 | * holding multiple logs, remains. | 155 | * holding multiple logs, remains. |
@@ -159,8 +160,8 @@ static int efi_pstore_erase_func(struct efivar_entry *entry, void *data) | |||
159 | for (i = 0; i < DUMP_NAME_LEN; i++) | 160 | for (i = 0; i < DUMP_NAME_LEN; i++) |
160 | efi_name_old[i] = name_old[i]; | 161 | efi_name_old[i] = name_old[i]; |
161 | 162 | ||
162 | if (utf16_strncmp(entry->var.VariableName, efi_name_old, | 163 | if (ucs2_strncmp(entry->var.VariableName, efi_name_old, |
163 | utf16_strlen(efi_name_old))) | 164 | ucs2_strlen(efi_name_old))) |
164 | return 0; | 165 | return 0; |
165 | } | 166 | } |
166 | 167 | ||
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 32bdf4f8e432..5145fa344ad5 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c | |||
@@ -72,7 +72,7 @@ static int generic_ops_register(void) | |||
72 | generic_ops.get_variable = efi.get_variable; | 72 | generic_ops.get_variable = efi.get_variable; |
73 | generic_ops.set_variable = efi.set_variable; | 73 | generic_ops.set_variable = efi.set_variable; |
74 | generic_ops.get_next_variable = efi.get_next_variable; | 74 | generic_ops.get_next_variable = efi.get_next_variable; |
75 | generic_ops.query_variable_info = efi.query_variable_info; | 75 | generic_ops.query_variable_store = efi_query_variable_store; |
76 | 76 | ||
77 | return efivars_register(&generic_efivars, &generic_ops, efi_kobj); | 77 | return efivars_register(&generic_efivars, &generic_ops, efi_kobj); |
78 | } | 78 | } |
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 70635b3b59d3..f8f5e5d9e020 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c | |||
@@ -67,6 +67,7 @@ | |||
67 | 67 | ||
68 | #include <linux/efi.h> | 68 | #include <linux/efi.h> |
69 | #include <linux/module.h> | 69 | #include <linux/module.h> |
70 | #include <linux/ucs2_string.h> | ||
70 | 71 | ||
71 | #define EFIVARS_VERSION "0.08" | 72 | #define EFIVARS_VERSION "0.08" |
72 | #define EFIVARS_DATE "2004-May-17" | 73 | #define EFIVARS_DATE "2004-May-17" |
@@ -407,7 +408,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) | |||
407 | efi_char16_t *variable_name; | 408 | efi_char16_t *variable_name; |
408 | 409 | ||
409 | variable_name = new_var->var.VariableName; | 410 | variable_name = new_var->var.VariableName; |
410 | variable_name_size = utf16_strlen(variable_name) * sizeof(efi_char16_t); | 411 | variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t); |
411 | 412 | ||
412 | /* | 413 | /* |
413 | * Length of the variable bytes in ASCII, plus the '-' separator, | 414 | * Length of the variable bytes in ASCII, plus the '-' separator, |
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index dd1c20a426fa..1d80c1ca39c5 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/device.h> | 32 | #include <linux/device.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/ctype.h> | 34 | #include <linux/ctype.h> |
35 | #include <linux/ucs2_string.h> | ||
35 | 36 | ||
36 | /* Private pointer to registered efivars */ | 37 | /* Private pointer to registered efivars */ |
37 | static struct efivars *__efivars; | 38 | static struct efivars *__efivars; |
@@ -91,7 +92,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer, | |||
91 | u16 filepathlength; | 92 | u16 filepathlength; |
92 | int i, desclength = 0, namelen; | 93 | int i, desclength = 0, namelen; |
93 | 94 | ||
94 | namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName)); | 95 | namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName)); |
95 | 96 | ||
96 | /* Either "Boot" or "Driver" followed by four digits of hex */ | 97 | /* Either "Boot" or "Driver" followed by four digits of hex */ |
97 | for (i = match; i < match+4; i++) { | 98 | for (i = match; i < match+4; i++) { |
@@ -114,7 +115,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer, | |||
114 | * There's no stored length for the description, so it has to be | 115 | * There's no stored length for the description, so it has to be |
115 | * found by hand | 116 | * found by hand |
116 | */ | 117 | */ |
117 | desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; | 118 | desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; |
118 | 119 | ||
119 | /* Each boot entry must have a descriptor */ | 120 | /* Each boot entry must have a descriptor */ |
120 | if (!desclength) | 121 | if (!desclength) |
@@ -228,24 +229,12 @@ EXPORT_SYMBOL_GPL(efivar_validate); | |||
228 | static efi_status_t | 229 | static efi_status_t |
229 | check_var_size(u32 attributes, unsigned long size) | 230 | check_var_size(u32 attributes, unsigned long size) |
230 | { | 231 | { |
231 | u64 storage_size, remaining_size, max_size; | ||
232 | efi_status_t status; | ||
233 | const struct efivar_operations *fops = __efivars->ops; | 232 | const struct efivar_operations *fops = __efivars->ops; |
234 | 233 | ||
235 | if (!fops->query_variable_info) | 234 | if (!fops->query_variable_store) |
236 | return EFI_UNSUPPORTED; | 235 | return EFI_UNSUPPORTED; |
237 | 236 | ||
238 | status = fops->query_variable_info(attributes, &storage_size, | 237 | return fops->query_variable_store(attributes, size); |
239 | &remaining_size, &max_size); | ||
240 | |||
241 | if (status != EFI_SUCCESS) | ||
242 | return status; | ||
243 | |||
244 | if (!storage_size || size > remaining_size || size > max_size || | ||
245 | (remaining_size - size) < (storage_size / 2)) | ||
246 | return EFI_OUT_OF_RESOURCES; | ||
247 | |||
248 | return status; | ||
249 | } | 238 | } |
250 | 239 | ||
251 | static int efi_status_to_err(efi_status_t status) | 240 | static int efi_status_to_err(efi_status_t status) |
@@ -288,9 +277,9 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor, | |||
288 | unsigned long strsize1, strsize2; | 277 | unsigned long strsize1, strsize2; |
289 | bool found = false; | 278 | bool found = false; |
290 | 279 | ||
291 | strsize1 = utf16_strsize(variable_name, 1024); | 280 | strsize1 = ucs2_strsize(variable_name, 1024); |
292 | list_for_each_entry_safe(entry, n, head, list) { | 281 | list_for_each_entry_safe(entry, n, head, list) { |
293 | strsize2 = utf16_strsize(entry->var.VariableName, 1024); | 282 | strsize2 = ucs2_strsize(entry->var.VariableName, 1024); |
294 | if (strsize1 == strsize2 && | 283 | if (strsize1 == strsize2 && |
295 | !memcmp(variable_name, &(entry->var.VariableName), | 284 | !memcmp(variable_name, &(entry->var.VariableName), |
296 | strsize2) && | 285 | strsize2) && |
@@ -594,7 +583,7 @@ int efivar_entry_set(struct efivar_entry *entry, u32 attributes, | |||
594 | return -EEXIST; | 583 | return -EEXIST; |
595 | } | 584 | } |
596 | 585 | ||
597 | status = check_var_size(attributes, size + utf16_strsize(name, 1024)); | 586 | status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); |
598 | if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED) | 587 | if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED) |
599 | status = ops->set_variable(name, &vendor, | 588 | status = ops->set_variable(name, &vendor, |
600 | attributes, size, data); | 589 | attributes, size, data); |
@@ -630,7 +619,7 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, | |||
630 | unsigned long flags; | 619 | unsigned long flags; |
631 | efi_status_t status; | 620 | efi_status_t status; |
632 | 621 | ||
633 | if (!ops->query_variable_info) | 622 | if (!ops->query_variable_store) |
634 | return -ENOSYS; | 623 | return -ENOSYS; |
635 | 624 | ||
636 | if (!block && spin_trylock_irqsave(&__efivars->lock, flags)) | 625 | if (!block && spin_trylock_irqsave(&__efivars->lock, flags)) |
@@ -638,7 +627,7 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, | |||
638 | else | 627 | else |
639 | spin_lock_irqsave(&__efivars->lock, flags); | 628 | spin_lock_irqsave(&__efivars->lock, flags); |
640 | 629 | ||
641 | status = check_var_size(attributes, size + utf16_strsize(name, 1024)); | 630 | status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); |
642 | if (status != EFI_SUCCESS) { | 631 | if (status != EFI_SUCCESS) { |
643 | spin_unlock_irqrestore(&__efivars->lock, flags); | 632 | spin_unlock_irqrestore(&__efivars->lock, flags); |
644 | return -ENOSPC; | 633 | return -ENOSPC; |
@@ -679,8 +668,8 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, | |||
679 | WARN_ON(!spin_is_locked(&__efivars->lock)); | 668 | WARN_ON(!spin_is_locked(&__efivars->lock)); |
680 | 669 | ||
681 | list_for_each_entry_safe(entry, n, head, list) { | 670 | list_for_each_entry_safe(entry, n, head, list) { |
682 | strsize1 = utf16_strsize(name, 1024); | 671 | strsize1 = ucs2_strsize(name, 1024); |
683 | strsize2 = utf16_strsize(entry->var.VariableName, 1024); | 672 | strsize2 = ucs2_strsize(entry->var.VariableName, 1024); |
684 | if (strsize1 == strsize2 && | 673 | if (strsize1 == strsize2 && |
685 | !memcmp(name, &(entry->var.VariableName), strsize1) && | 674 | !memcmp(name, &(entry->var.VariableName), strsize1) && |
686 | !efi_guidcmp(guid, entry->var.VendorGuid)) { | 675 | !efi_guidcmp(guid, entry->var.VendorGuid)) { |
@@ -818,7 +807,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, | |||
818 | /* | 807 | /* |
819 | * Ensure that the available space hasn't shrunk below the safe level | 808 | * Ensure that the available space hasn't shrunk below the safe level |
820 | */ | 809 | */ |
821 | status = check_var_size(attributes, *size + utf16_strsize(name, 1024)); | 810 | status = check_var_size(attributes, *size + ucs2_strsize(name, 1024)); |
822 | if (status != EFI_SUCCESS) { | 811 | if (status != EFI_SUCCESS) { |
823 | if (status != EFI_UNSUPPORTED) { | 812 | if (status != EFI_UNSUPPORTED) { |
824 | err = efi_status_to_err(status); | 813 | err = efi_status_to_err(status); |
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index 757b2d92d5b0..acba0b9f4406 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/reboot.h> | 28 | #include <linux/reboot.h> |
29 | #include <linux/efi.h> | 29 | #include <linux/efi.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/ucs2_string.h> | ||
31 | 32 | ||
32 | #define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */ | 33 | #define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */ |
33 | /* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */ | 34 | /* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */ |
@@ -300,7 +301,7 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name, | |||
300 | }; | 301 | }; |
301 | efi_status_t ret = EFI_SUCCESS; | 302 | efi_status_t ret = EFI_SUCCESS; |
302 | unsigned long flags; | 303 | unsigned long flags; |
303 | size_t name_len = utf16_strnlen(name, GSMI_BUF_SIZE / 2); | 304 | size_t name_len = ucs2_strnlen(name, GSMI_BUF_SIZE / 2); |
304 | int rc; | 305 | int rc; |
305 | 306 | ||
306 | if (name_len >= GSMI_BUF_SIZE / 2) | 307 | if (name_len >= GSMI_BUF_SIZE / 2) |
@@ -369,7 +370,7 @@ static efi_status_t gsmi_get_next_variable(unsigned long *name_size, | |||
369 | return EFI_BAD_BUFFER_SIZE; | 370 | return EFI_BAD_BUFFER_SIZE; |
370 | 371 | ||
371 | /* Let's make sure the thing is at least null-terminated */ | 372 | /* Let's make sure the thing is at least null-terminated */ |
372 | if (utf16_strnlen(name, GSMI_BUF_SIZE / 2) == GSMI_BUF_SIZE / 2) | 373 | if (ucs2_strnlen(name, GSMI_BUF_SIZE / 2) == GSMI_BUF_SIZE / 2) |
373 | return EFI_INVALID_PARAMETER; | 374 | return EFI_INVALID_PARAMETER; |
374 | 375 | ||
375 | spin_lock_irqsave(&gsmi_dev.lock, flags); | 376 | spin_lock_irqsave(&gsmi_dev.lock, flags); |
@@ -397,7 +398,7 @@ static efi_status_t gsmi_get_next_variable(unsigned long *name_size, | |||
397 | 398 | ||
398 | /* Copy the name back */ | 399 | /* Copy the name back */ |
399 | memcpy(name, gsmi_dev.name_buf->start, GSMI_BUF_SIZE); | 400 | memcpy(name, gsmi_dev.name_buf->start, GSMI_BUF_SIZE); |
400 | *name_size = utf16_strnlen(name, GSMI_BUF_SIZE / 2) * 2; | 401 | *name_size = ucs2_strnlen(name, GSMI_BUF_SIZE / 2) * 2; |
401 | 402 | ||
402 | /* copy guid to return buffer */ | 403 | /* copy guid to return buffer */ |
403 | memcpy(vendor, ¶m.guid, sizeof(param.guid)); | 404 | memcpy(vendor, ¶m.guid, sizeof(param.guid)); |
@@ -423,7 +424,7 @@ static efi_status_t gsmi_set_variable(efi_char16_t *name, | |||
423 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | 424 | EFI_VARIABLE_BOOTSERVICE_ACCESS | |
424 | EFI_VARIABLE_RUNTIME_ACCESS, | 425 | EFI_VARIABLE_RUNTIME_ACCESS, |
425 | }; | 426 | }; |
426 | size_t name_len = utf16_strnlen(name, GSMI_BUF_SIZE / 2); | 427 | size_t name_len = ucs2_strnlen(name, GSMI_BUF_SIZE / 2); |
427 | efi_status_t ret = EFI_SUCCESS; | 428 | efi_status_t ret = EFI_SUCCESS; |
428 | int rc; | 429 | int rc; |
429 | unsigned long flags; | 430 | unsigned long flags; |
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c index f9dbd503fc40..de3c317bd3e2 100644 --- a/drivers/gpio/gpio-ich.c +++ b/drivers/gpio/gpio-ich.c | |||
@@ -214,7 +214,7 @@ static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr) | |||
214 | * If it can't be trusted, assume that the pin can be used as a GPIO. | 214 | * If it can't be trusted, assume that the pin can be used as a GPIO. |
215 | */ | 215 | */ |
216 | if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f))) | 216 | if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f))) |
217 | return 1; | 217 | return 0; |
218 | 218 | ||
219 | return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV; | 219 | return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV; |
220 | } | 220 | } |
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 24059462c87f..9391cf16e990 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c | |||
@@ -575,7 +575,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, | |||
575 | chip->gpio_chip.ngpio, | 575 | chip->gpio_chip.ngpio, |
576 | irq_base, | 576 | irq_base, |
577 | &pca953x_irq_simple_ops, | 577 | &pca953x_irq_simple_ops, |
578 | NULL); | 578 | chip); |
579 | if (!chip->domain) | 579 | if (!chip->domain) |
580 | return -ENODEV; | 580 | return -ENODEV; |
581 | 581 | ||
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 9cc108d2b770..8325f580c0f1 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c | |||
@@ -642,7 +642,12 @@ static struct platform_driver pxa_gpio_driver = { | |||
642 | .of_match_table = of_match_ptr(pxa_gpio_dt_ids), | 642 | .of_match_table = of_match_ptr(pxa_gpio_dt_ids), |
643 | }, | 643 | }, |
644 | }; | 644 | }; |
645 | module_platform_driver(pxa_gpio_driver); | 645 | |
646 | static int __init pxa_gpio_init(void) | ||
647 | { | ||
648 | return platform_driver_register(&pxa_gpio_driver); | ||
649 | } | ||
650 | postcore_initcall(pxa_gpio_init); | ||
646 | 651 | ||
647 | #ifdef CONFIG_PM | 652 | #ifdef CONFIG_PM |
648 | static int pxa_gpio_suspend(void) | 653 | static int pxa_gpio_suspend(void) |
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 770476a9da87..3ce5bc38ac31 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c | |||
@@ -307,11 +307,15 @@ static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = { | |||
307 | .xlate = irq_domain_xlate_twocell, | 307 | .xlate = irq_domain_xlate_twocell, |
308 | }; | 308 | }; |
309 | 309 | ||
310 | static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio) | 310 | static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio, |
311 | struct device_node *np) | ||
311 | { | 312 | { |
312 | int base = stmpe_gpio->irq_base; | 313 | int base = 0; |
313 | 314 | ||
314 | stmpe_gpio->domain = irq_domain_add_simple(NULL, | 315 | if (!np) |
316 | base = stmpe_gpio->irq_base; | ||
317 | |||
318 | stmpe_gpio->domain = irq_domain_add_simple(np, | ||
315 | stmpe_gpio->chip.ngpio, base, | 319 | stmpe_gpio->chip.ngpio, base, |
316 | &stmpe_gpio_irq_simple_ops, stmpe_gpio); | 320 | &stmpe_gpio_irq_simple_ops, stmpe_gpio); |
317 | if (!stmpe_gpio->domain) { | 321 | if (!stmpe_gpio->domain) { |
@@ -346,6 +350,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev) | |||
346 | stmpe_gpio->chip = template_chip; | 350 | stmpe_gpio->chip = template_chip; |
347 | stmpe_gpio->chip.ngpio = stmpe->num_gpios; | 351 | stmpe_gpio->chip.ngpio = stmpe->num_gpios; |
348 | stmpe_gpio->chip.dev = &pdev->dev; | 352 | stmpe_gpio->chip.dev = &pdev->dev; |
353 | #ifdef CONFIG_OF | ||
354 | stmpe_gpio->chip.of_node = np; | ||
355 | #endif | ||
349 | stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1; | 356 | stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1; |
350 | 357 | ||
351 | if (pdata) | 358 | if (pdata) |
@@ -366,7 +373,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev) | |||
366 | goto out_free; | 373 | goto out_free; |
367 | 374 | ||
368 | if (irq >= 0) { | 375 | if (irq >= 0) { |
369 | ret = stmpe_gpio_irq_init(stmpe_gpio); | 376 | ret = stmpe_gpio_irq_init(stmpe_gpio, np); |
370 | if (ret) | 377 | if (ret) |
371 | goto out_disable; | 378 | goto out_disable; |
372 | 379 | ||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 792c3e3795ca..dd64a06dc5b4 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -2326,7 +2326,6 @@ int drm_mode_addfb(struct drm_device *dev, | |||
2326 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); | 2326 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); |
2327 | if (IS_ERR(fb)) { | 2327 | if (IS_ERR(fb)) { |
2328 | DRM_DEBUG_KMS("could not create framebuffer\n"); | 2328 | DRM_DEBUG_KMS("could not create framebuffer\n"); |
2329 | drm_modeset_unlock_all(dev); | ||
2330 | return PTR_ERR(fb); | 2329 | return PTR_ERR(fb); |
2331 | } | 2330 | } |
2332 | 2331 | ||
@@ -2506,7 +2505,6 @@ int drm_mode_addfb2(struct drm_device *dev, | |||
2506 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); | 2505 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); |
2507 | if (IS_ERR(fb)) { | 2506 | if (IS_ERR(fb)) { |
2508 | DRM_DEBUG_KMS("could not create framebuffer\n"); | 2507 | DRM_DEBUG_KMS("could not create framebuffer\n"); |
2509 | drm_modeset_unlock_all(dev); | ||
2510 | return PTR_ERR(fb); | 2508 | return PTR_ERR(fb); |
2511 | } | 2509 | } |
2512 | 2510 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 59d6b9bf204b..892ff9f95975 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -1544,10 +1544,10 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | |||
1544 | if (!fb_helper->fb) | 1544 | if (!fb_helper->fb) |
1545 | return 0; | 1545 | return 0; |
1546 | 1546 | ||
1547 | drm_modeset_lock_all(dev); | 1547 | mutex_lock(&fb_helper->dev->mode_config.mutex); |
1548 | if (!drm_fb_helper_is_bound(fb_helper)) { | 1548 | if (!drm_fb_helper_is_bound(fb_helper)) { |
1549 | fb_helper->delayed_hotplug = true; | 1549 | fb_helper->delayed_hotplug = true; |
1550 | drm_modeset_unlock_all(dev); | 1550 | mutex_unlock(&fb_helper->dev->mode_config.mutex); |
1551 | return 0; | 1551 | return 0; |
1552 | } | 1552 | } |
1553 | DRM_DEBUG_KMS("\n"); | 1553 | DRM_DEBUG_KMS("\n"); |
@@ -1558,9 +1558,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | |||
1558 | 1558 | ||
1559 | count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, | 1559 | count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, |
1560 | max_height); | 1560 | max_height); |
1561 | mutex_unlock(&fb_helper->dev->mode_config.mutex); | ||
1562 | |||
1563 | drm_modeset_lock_all(dev); | ||
1561 | drm_setup_crtcs(fb_helper); | 1564 | drm_setup_crtcs(fb_helper); |
1562 | drm_modeset_unlock_all(dev); | 1565 | drm_modeset_unlock_all(dev); |
1563 | |||
1564 | drm_fb_helper_set_par(fb_helper->fbdev); | 1566 | drm_fb_helper_set_par(fb_helper->fbdev); |
1565 | 1567 | ||
1566 | return 0; | 1568 | return 0; |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 13fdcd10a605..429e07d0b0f1 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -123,6 +123,7 @@ int drm_open(struct inode *inode, struct file *filp) | |||
123 | int retcode = 0; | 123 | int retcode = 0; |
124 | int need_setup = 0; | 124 | int need_setup = 0; |
125 | struct address_space *old_mapping; | 125 | struct address_space *old_mapping; |
126 | struct address_space *old_imapping; | ||
126 | 127 | ||
127 | minor = idr_find(&drm_minors_idr, minor_id); | 128 | minor = idr_find(&drm_minors_idr, minor_id); |
128 | if (!minor) | 129 | if (!minor) |
@@ -137,6 +138,7 @@ int drm_open(struct inode *inode, struct file *filp) | |||
137 | if (!dev->open_count++) | 138 | if (!dev->open_count++) |
138 | need_setup = 1; | 139 | need_setup = 1; |
139 | mutex_lock(&dev->struct_mutex); | 140 | mutex_lock(&dev->struct_mutex); |
141 | old_imapping = inode->i_mapping; | ||
140 | old_mapping = dev->dev_mapping; | 142 | old_mapping = dev->dev_mapping; |
141 | if (old_mapping == NULL) | 143 | if (old_mapping == NULL) |
142 | dev->dev_mapping = &inode->i_data; | 144 | dev->dev_mapping = &inode->i_data; |
@@ -159,8 +161,8 @@ int drm_open(struct inode *inode, struct file *filp) | |||
159 | 161 | ||
160 | err_undo: | 162 | err_undo: |
161 | mutex_lock(&dev->struct_mutex); | 163 | mutex_lock(&dev->struct_mutex); |
162 | filp->f_mapping = old_mapping; | 164 | filp->f_mapping = old_imapping; |
163 | inode->i_mapping = old_mapping; | 165 | inode->i_mapping = old_imapping; |
164 | iput(container_of(dev->dev_mapping, struct inode, i_data)); | 166 | iput(container_of(dev->dev_mapping, struct inode, i_data)); |
165 | dev->dev_mapping = old_mapping; | 167 | dev->dev_mapping = old_mapping; |
166 | mutex_unlock(&dev->struct_mutex); | 168 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 3b11ab0fbc96..9a48e1a2d417 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -57,7 +57,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args) | |||
57 | if (eb == NULL) { | 57 | if (eb == NULL) { |
58 | int size = args->buffer_count; | 58 | int size = args->buffer_count; |
59 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; | 59 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; |
60 | BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); | 60 | BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); |
61 | while (count > 2*size) | 61 | while (count > 2*size) |
62 | count >>= 1; | 62 | count >>= 1; |
63 | eb = kzalloc(count*sizeof(struct hlist_head) + | 63 | eb = kzalloc(count*sizeof(struct hlist_head) + |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 32a3693905ec..1ce45a0a2d3e 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -45,6 +45,9 @@ | |||
45 | 45 | ||
46 | struct intel_crt { | 46 | struct intel_crt { |
47 | struct intel_encoder base; | 47 | struct intel_encoder base; |
48 | /* DPMS state is stored in the connector, which we need in the | ||
49 | * encoder's enable/disable callbacks */ | ||
50 | struct intel_connector *connector; | ||
48 | bool force_hotplug_required; | 51 | bool force_hotplug_required; |
49 | u32 adpa_reg; | 52 | u32 adpa_reg; |
50 | }; | 53 | }; |
@@ -81,29 +84,6 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, | |||
81 | return true; | 84 | return true; |
82 | } | 85 | } |
83 | 86 | ||
84 | static void intel_disable_crt(struct intel_encoder *encoder) | ||
85 | { | ||
86 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | ||
87 | struct intel_crt *crt = intel_encoder_to_crt(encoder); | ||
88 | u32 temp; | ||
89 | |||
90 | temp = I915_READ(crt->adpa_reg); | ||
91 | temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; | ||
92 | temp &= ~ADPA_DAC_ENABLE; | ||
93 | I915_WRITE(crt->adpa_reg, temp); | ||
94 | } | ||
95 | |||
96 | static void intel_enable_crt(struct intel_encoder *encoder) | ||
97 | { | ||
98 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | ||
99 | struct intel_crt *crt = intel_encoder_to_crt(encoder); | ||
100 | u32 temp; | ||
101 | |||
102 | temp = I915_READ(crt->adpa_reg); | ||
103 | temp |= ADPA_DAC_ENABLE; | ||
104 | I915_WRITE(crt->adpa_reg, temp); | ||
105 | } | ||
106 | |||
107 | /* Note: The caller is required to filter out dpms modes not supported by the | 87 | /* Note: The caller is required to filter out dpms modes not supported by the |
108 | * platform. */ | 88 | * platform. */ |
109 | static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) | 89 | static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) |
@@ -135,6 +115,19 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) | |||
135 | I915_WRITE(crt->adpa_reg, temp); | 115 | I915_WRITE(crt->adpa_reg, temp); |
136 | } | 116 | } |
137 | 117 | ||
118 | static void intel_disable_crt(struct intel_encoder *encoder) | ||
119 | { | ||
120 | intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
121 | } | ||
122 | |||
123 | static void intel_enable_crt(struct intel_encoder *encoder) | ||
124 | { | ||
125 | struct intel_crt *crt = intel_encoder_to_crt(encoder); | ||
126 | |||
127 | intel_crt_set_dpms(encoder, crt->connector->base.dpms); | ||
128 | } | ||
129 | |||
130 | |||
138 | static void intel_crt_dpms(struct drm_connector *connector, int mode) | 131 | static void intel_crt_dpms(struct drm_connector *connector, int mode) |
139 | { | 132 | { |
140 | struct drm_device *dev = connector->dev; | 133 | struct drm_device *dev = connector->dev; |
@@ -746,6 +739,7 @@ void intel_crt_init(struct drm_device *dev) | |||
746 | } | 739 | } |
747 | 740 | ||
748 | connector = &intel_connector->base; | 741 | connector = &intel_connector->base; |
742 | crt->connector = intel_connector; | ||
749 | drm_connector_init(dev, &intel_connector->base, | 743 | drm_connector_init(dev, &intel_connector->base, |
750 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); | 744 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); |
751 | 745 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d7d4afe01341..8fc93f90a7cd 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -2559,12 +2559,15 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) | |||
2559 | { | 2559 | { |
2560 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 2560 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
2561 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 2561 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
2562 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | ||
2562 | 2563 | ||
2563 | i2c_del_adapter(&intel_dp->adapter); | 2564 | i2c_del_adapter(&intel_dp->adapter); |
2564 | drm_encoder_cleanup(encoder); | 2565 | drm_encoder_cleanup(encoder); |
2565 | if (is_edp(intel_dp)) { | 2566 | if (is_edp(intel_dp)) { |
2566 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | 2567 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
2568 | mutex_lock(&dev->mode_config.mutex); | ||
2567 | ironlake_panel_vdd_off_sync(intel_dp); | 2569 | ironlake_panel_vdd_off_sync(intel_dp); |
2570 | mutex_unlock(&dev->mode_config.mutex); | ||
2568 | } | 2571 | } |
2569 | kfree(intel_dig_port); | 2572 | kfree(intel_dig_port); |
2570 | } | 2573 | } |
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index fe22bb780e1d..78d8e919509f 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
@@ -751,8 +751,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
751 | int i; | 751 | int i; |
752 | unsigned char misc = 0; | 752 | unsigned char misc = 0; |
753 | unsigned char ext_vga[6]; | 753 | unsigned char ext_vga[6]; |
754 | unsigned char ext_vga_index24; | ||
755 | unsigned char dac_index90 = 0; | ||
756 | u8 bppshift; | 754 | u8 bppshift; |
757 | 755 | ||
758 | static unsigned char dacvalue[] = { | 756 | static unsigned char dacvalue[] = { |
@@ -803,7 +801,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
803 | option2 = 0x0000b000; | 801 | option2 = 0x0000b000; |
804 | break; | 802 | break; |
805 | case G200_ER: | 803 | case G200_ER: |
806 | dac_index90 = 0; | ||
807 | break; | 804 | break; |
808 | } | 805 | } |
809 | 806 | ||
@@ -852,10 +849,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
852 | WREG_DAC(i, dacvalue[i]); | 849 | WREG_DAC(i, dacvalue[i]); |
853 | } | 850 | } |
854 | 851 | ||
855 | if (mdev->type == G200_ER) { | 852 | if (mdev->type == G200_ER) |
856 | WREG_DAC(0x90, dac_index90); | 853 | WREG_DAC(0x90, 0); |
857 | } | ||
858 | |||
859 | 854 | ||
860 | if (option) | 855 | if (option) |
861 | pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); | 856 | pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); |
@@ -952,8 +947,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
952 | if (mdev->type == G200_WB) | 947 | if (mdev->type == G200_WB) |
953 | ext_vga[1] |= 0x88; | 948 | ext_vga[1] |= 0x88; |
954 | 949 | ||
955 | ext_vga_index24 = 0x05; | ||
956 | |||
957 | /* Set pixel clocks */ | 950 | /* Set pixel clocks */ |
958 | misc = 0x2d; | 951 | misc = 0x2d; |
959 | WREG8(MGA_MISC_OUT, misc); | 952 | WREG8(MGA_MISC_OUT, misc); |
@@ -965,7 +958,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
965 | } | 958 | } |
966 | 959 | ||
967 | if (mdev->type == G200_ER) | 960 | if (mdev->type == G200_ER) |
968 | WREG_ECRT(24, ext_vga_index24); | 961 | WREG_ECRT(0x24, 0x5); |
969 | 962 | ||
970 | if (mdev->type == G200_EV) { | 963 | if (mdev->type == G200_EV) { |
971 | WREG_ECRT(6, 0); | 964 | WREG_ECRT(6, 0); |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index e816f06637a7..0e2c1a4f1659 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c | |||
@@ -248,6 +248,22 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios) | |||
248 | } | 248 | } |
249 | } | 249 | } |
250 | 250 | ||
251 | static void | ||
252 | nouveau_bios_shadow_platform(struct nouveau_bios *bios) | ||
253 | { | ||
254 | struct pci_dev *pdev = nv_device(bios)->pdev; | ||
255 | size_t size; | ||
256 | |||
257 | void __iomem *rom = pci_platform_rom(pdev, &size); | ||
258 | if (rom && size) { | ||
259 | bios->data = kmalloc(size, GFP_KERNEL); | ||
260 | if (bios->data) { | ||
261 | memcpy_fromio(bios->data, rom, size); | ||
262 | bios->size = size; | ||
263 | } | ||
264 | } | ||
265 | } | ||
266 | |||
251 | static int | 267 | static int |
252 | nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) | 268 | nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) |
253 | { | 269 | { |
@@ -288,6 +304,7 @@ nouveau_bios_shadow(struct nouveau_bios *bios) | |||
288 | { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL }, | 304 | { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL }, |
289 | { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL }, | 305 | { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL }, |
290 | { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL }, | 306 | { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL }, |
307 | { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL }, | ||
291 | {} | 308 | {} |
292 | }; | 309 | }; |
293 | struct methods *mthd, *best; | 310 | struct methods *mthd, *best; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 3b6dc883e150..5eb3e0da7c6e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c | |||
@@ -391,7 +391,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) | |||
391 | struct nouveau_drm *drm = nouveau_drm(dev); | 391 | struct nouveau_drm *drm = nouveau_drm(dev); |
392 | struct nouveau_device *device = nv_device(drm->device); | 392 | struct nouveau_device *device = nv_device(drm->device); |
393 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); | 393 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); |
394 | struct nouveau_abi16_chan *chan, *temp; | 394 | struct nouveau_abi16_chan *chan = NULL, *temp; |
395 | struct nouveau_abi16_ntfy *ntfy; | 395 | struct nouveau_abi16_ntfy *ntfy; |
396 | struct nouveau_object *object; | 396 | struct nouveau_object *object; |
397 | struct nv_dma_class args = {}; | 397 | struct nv_dma_class args = {}; |
@@ -404,10 +404,11 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) | |||
404 | if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) | 404 | if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) |
405 | return nouveau_abi16_put(abi16, -EINVAL); | 405 | return nouveau_abi16_put(abi16, -EINVAL); |
406 | 406 | ||
407 | list_for_each_entry_safe(chan, temp, &abi16->channels, head) { | 407 | list_for_each_entry(temp, &abi16->channels, head) { |
408 | if (chan->chan->handle == (NVDRM_CHAN | info->channel)) | 408 | if (temp->chan->handle == (NVDRM_CHAN | info->channel)) { |
409 | chan = temp; | ||
409 | break; | 410 | break; |
410 | chan = NULL; | 411 | } |
411 | } | 412 | } |
412 | 413 | ||
413 | if (!chan) | 414 | if (!chan) |
@@ -459,17 +460,18 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) | |||
459 | { | 460 | { |
460 | struct drm_nouveau_gpuobj_free *fini = data; | 461 | struct drm_nouveau_gpuobj_free *fini = data; |
461 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); | 462 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); |
462 | struct nouveau_abi16_chan *chan, *temp; | 463 | struct nouveau_abi16_chan *chan = NULL, *temp; |
463 | struct nouveau_abi16_ntfy *ntfy; | 464 | struct nouveau_abi16_ntfy *ntfy; |
464 | int ret; | 465 | int ret; |
465 | 466 | ||
466 | if (unlikely(!abi16)) | 467 | if (unlikely(!abi16)) |
467 | return -ENOMEM; | 468 | return -ENOMEM; |
468 | 469 | ||
469 | list_for_each_entry_safe(chan, temp, &abi16->channels, head) { | 470 | list_for_each_entry(temp, &abi16->channels, head) { |
470 | if (chan->chan->handle == (NVDRM_CHAN | fini->channel)) | 471 | if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) { |
472 | chan = temp; | ||
471 | break; | 473 | break; |
472 | chan = NULL; | 474 | } |
473 | } | 475 | } |
474 | 476 | ||
475 | if (!chan) | 477 | if (!chan) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index d1099365bfc1..c95decf543e9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -72,11 +72,25 @@ module_param_named(modeset, nouveau_modeset, int, 0400); | |||
72 | static struct drm_driver driver; | 72 | static struct drm_driver driver; |
73 | 73 | ||
74 | static int | 74 | static int |
75 | nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) | ||
76 | { | ||
77 | struct nouveau_drm *drm = | ||
78 | container_of(event, struct nouveau_drm, vblank[head]); | ||
79 | drm_handle_vblank(drm->dev, head); | ||
80 | return NVKM_EVENT_KEEP; | ||
81 | } | ||
82 | |||
83 | static int | ||
75 | nouveau_drm_vblank_enable(struct drm_device *dev, int head) | 84 | nouveau_drm_vblank_enable(struct drm_device *dev, int head) |
76 | { | 85 | { |
77 | struct nouveau_drm *drm = nouveau_drm(dev); | 86 | struct nouveau_drm *drm = nouveau_drm(dev); |
78 | struct nouveau_disp *pdisp = nouveau_disp(drm->device); | 87 | struct nouveau_disp *pdisp = nouveau_disp(drm->device); |
79 | nouveau_event_get(pdisp->vblank, head, &drm->vblank); | 88 | |
89 | if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank))) | ||
90 | return -EIO; | ||
91 | WARN_ON_ONCE(drm->vblank[head].func); | ||
92 | drm->vblank[head].func = nouveau_drm_vblank_handler; | ||
93 | nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]); | ||
80 | return 0; | 94 | return 0; |
81 | } | 95 | } |
82 | 96 | ||
@@ -85,16 +99,11 @@ nouveau_drm_vblank_disable(struct drm_device *dev, int head) | |||
85 | { | 99 | { |
86 | struct nouveau_drm *drm = nouveau_drm(dev); | 100 | struct nouveau_drm *drm = nouveau_drm(dev); |
87 | struct nouveau_disp *pdisp = nouveau_disp(drm->device); | 101 | struct nouveau_disp *pdisp = nouveau_disp(drm->device); |
88 | nouveau_event_put(pdisp->vblank, head, &drm->vblank); | 102 | if (drm->vblank[head].func) |
89 | } | 103 | nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]); |
90 | 104 | else | |
91 | static int | 105 | WARN_ON_ONCE(1); |
92 | nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) | 106 | drm->vblank[head].func = NULL; |
93 | { | ||
94 | struct nouveau_drm *drm = | ||
95 | container_of(event, struct nouveau_drm, vblank); | ||
96 | drm_handle_vblank(drm->dev, head); | ||
97 | return NVKM_EVENT_KEEP; | ||
98 | } | 107 | } |
99 | 108 | ||
100 | static u64 | 109 | static u64 |
@@ -292,7 +301,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
292 | 301 | ||
293 | dev->dev_private = drm; | 302 | dev->dev_private = drm; |
294 | drm->dev = dev; | 303 | drm->dev = dev; |
295 | drm->vblank.func = nouveau_drm_vblank_handler; | ||
296 | 304 | ||
297 | INIT_LIST_HEAD(&drm->clients); | 305 | INIT_LIST_HEAD(&drm->clients); |
298 | spin_lock_init(&drm->tile.lock); | 306 | spin_lock_init(&drm->tile.lock); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index b25df374c901..9c39bafbef2c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h | |||
@@ -113,7 +113,7 @@ struct nouveau_drm { | |||
113 | struct nvbios vbios; | 113 | struct nvbios vbios; |
114 | struct nouveau_display *display; | 114 | struct nouveau_display *display; |
115 | struct backlight_device *backlight; | 115 | struct backlight_device *backlight; |
116 | struct nouveau_eventh vblank; | 116 | struct nouveau_eventh vblank[4]; |
117 | 117 | ||
118 | /* power management */ | 118 | /* power management */ |
119 | struct nouveau_pm *pm; | 119 | struct nouveau_pm *pm; |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 7f0e6c3f37d1..1ddc03e51bf4 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data) | |||
479 | { | 479 | { |
480 | struct nv50_display_flip *flip = data; | 480 | struct nv50_display_flip *flip = data; |
481 | if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == | 481 | if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == |
482 | flip->chan->data); | 482 | flip->chan->data) |
483 | return true; | 483 | return true; |
484 | usleep_range(1, 2); | 484 | usleep_range(1, 2); |
485 | return false; | 485 | return false; |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index b8015913d382..fa3c56fba294 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
@@ -99,6 +99,29 @@ static bool radeon_read_bios(struct radeon_device *rdev) | |||
99 | return true; | 99 | return true; |
100 | } | 100 | } |
101 | 101 | ||
102 | static bool radeon_read_platform_bios(struct radeon_device *rdev) | ||
103 | { | ||
104 | uint8_t __iomem *bios; | ||
105 | size_t size; | ||
106 | |||
107 | rdev->bios = NULL; | ||
108 | |||
109 | bios = pci_platform_rom(rdev->pdev, &size); | ||
110 | if (!bios) { | ||
111 | return false; | ||
112 | } | ||
113 | |||
114 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { | ||
115 | return false; | ||
116 | } | ||
117 | rdev->bios = kmemdup(bios, size, GFP_KERNEL); | ||
118 | if (rdev->bios == NULL) { | ||
119 | return false; | ||
120 | } | ||
121 | |||
122 | return true; | ||
123 | } | ||
124 | |||
102 | #ifdef CONFIG_ACPI | 125 | #ifdef CONFIG_ACPI |
103 | /* ATRM is used to get the BIOS on the discrete cards in | 126 | /* ATRM is used to get the BIOS on the discrete cards in |
104 | * dual-gpu systems. | 127 | * dual-gpu systems. |
@@ -620,6 +643,9 @@ bool radeon_get_bios(struct radeon_device *rdev) | |||
620 | if (r == false) { | 643 | if (r == false) { |
621 | r = radeon_read_disabled_bios(rdev); | 644 | r = radeon_read_disabled_bios(rdev); |
622 | } | 645 | } |
646 | if (r == false) { | ||
647 | r = radeon_read_platform_bios(rdev); | ||
648 | } | ||
623 | if (r == false || rdev->bios == NULL) { | 649 | if (r == false || rdev->bios == NULL) { |
624 | DRM_ERROR("Unable to locate a BIOS ROM\n"); | 650 | DRM_ERROR("Unable to locate a BIOS ROM\n"); |
625 | rdev->bios = NULL; | 651 | rdev->bios = NULL; |
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index fe5cdbcf2636..b44d548c56f8 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c | |||
@@ -61,6 +61,10 @@ static int udl_get_modes(struct drm_connector *connector) | |||
61 | int ret; | 61 | int ret; |
62 | 62 | ||
63 | edid = (struct edid *)udl_get_edid(udl); | 63 | edid = (struct edid *)udl_get_edid(udl); |
64 | if (!edid) { | ||
65 | drm_mode_connector_update_edid_property(connector, NULL); | ||
66 | return 0; | ||
67 | } | ||
64 | 68 | ||
65 | /* | 69 | /* |
66 | * We only read the main block, but if the monitor reports extension | 70 | * We only read the main block, but if the monitor reports extension |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 512b01c04ea7..aa341d135867 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -2077,7 +2077,6 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
2077 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, | 2077 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, |
2078 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, | 2078 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, |
2079 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, | 2079 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, |
2080 | { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) }, | ||
2081 | { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) }, | 2080 | { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) }, |
2082 | { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, | 2081 | { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, |
2083 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, | 2082 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, |
@@ -2244,6 +2243,18 @@ bool hid_ignore(struct hid_device *hdev) | |||
2244 | hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST)) | 2243 | hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST)) |
2245 | return true; | 2244 | return true; |
2246 | break; | 2245 | break; |
2246 | case USB_VENDOR_ID_ATMEL_V_USB: | ||
2247 | /* Masterkit MA901 usb radio based on Atmel tiny85 chip and | ||
2248 | * it has the same USB ID as many Atmel V-USB devices. This | ||
2249 | * usb radio is handled by radio-ma901.c driver so we want | ||
2250 | * ignore the hid. Check the name, bus, product and ignore | ||
2251 | * if we have MA901 usb radio. | ||
2252 | */ | ||
2253 | if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB && | ||
2254 | hdev->bus == BUS_USB && | ||
2255 | strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0) | ||
2256 | return true; | ||
2257 | break; | ||
2247 | } | 2258 | } |
2248 | 2259 | ||
2249 | if (hdev->type == HID_TYPE_USBMOUSE && | 2260 | if (hdev->type == HID_TYPE_USBMOUSE && |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index c4388776f4e4..5309fd5eb0eb 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -158,6 +158,8 @@ | |||
158 | #define USB_VENDOR_ID_ATMEL 0x03eb | 158 | #define USB_VENDOR_ID_ATMEL 0x03eb |
159 | #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c | 159 | #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c |
160 | #define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118 | 160 | #define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118 |
161 | #define USB_VENDOR_ID_ATMEL_V_USB 0x16c0 | ||
162 | #define USB_DEVICE_ID_ATMEL_V_USB 0x05df | ||
161 | 163 | ||
162 | #define USB_VENDOR_ID_AUREAL 0x0755 | 164 | #define USB_VENDOR_ID_AUREAL 0x0755 |
163 | #define USB_DEVICE_ID_AUREAL_W01RN 0x2626 | 165 | #define USB_DEVICE_ID_AUREAL_W01RN 0x2626 |
@@ -557,9 +559,6 @@ | |||
557 | #define USB_VENDOR_ID_MADCATZ 0x0738 | 559 | #define USB_VENDOR_ID_MADCATZ 0x0738 |
558 | #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 | 560 | #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 |
559 | 561 | ||
560 | #define USB_VENDOR_ID_MASTERKIT 0x16c0 | ||
561 | #define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df | ||
562 | |||
563 | #define USB_VENDOR_ID_MCC 0x09db | 562 | #define USB_VENDOR_ID_MCC 0x09db |
564 | #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 | 563 | #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 |
565 | #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a | 564 | #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a |
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index f7f113ba083e..a8ce44296cfd 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c | |||
@@ -462,6 +462,21 @@ static int magicmouse_input_mapping(struct hid_device *hdev, | |||
462 | return 0; | 462 | return 0; |
463 | } | 463 | } |
464 | 464 | ||
465 | static void magicmouse_input_configured(struct hid_device *hdev, | ||
466 | struct hid_input *hi) | ||
467 | |||
468 | { | ||
469 | struct magicmouse_sc *msc = hid_get_drvdata(hdev); | ||
470 | |||
471 | int ret = magicmouse_setup_input(msc->input, hdev); | ||
472 | if (ret) { | ||
473 | hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); | ||
474 | /* clean msc->input to notify probe() of the failure */ | ||
475 | msc->input = NULL; | ||
476 | } | ||
477 | } | ||
478 | |||
479 | |||
465 | static int magicmouse_probe(struct hid_device *hdev, | 480 | static int magicmouse_probe(struct hid_device *hdev, |
466 | const struct hid_device_id *id) | 481 | const struct hid_device_id *id) |
467 | { | 482 | { |
@@ -493,15 +508,10 @@ static int magicmouse_probe(struct hid_device *hdev, | |||
493 | goto err_free; | 508 | goto err_free; |
494 | } | 509 | } |
495 | 510 | ||
496 | /* We do this after hid-input is done parsing reports so that | 511 | if (!msc->input) { |
497 | * hid-input uses the most natural button and axis IDs. | 512 | hid_err(hdev, "magicmouse input not registered\n"); |
498 | */ | 513 | ret = -ENOMEM; |
499 | if (msc->input) { | 514 | goto err_stop_hw; |
500 | ret = magicmouse_setup_input(msc->input, hdev); | ||
501 | if (ret) { | ||
502 | hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); | ||
503 | goto err_stop_hw; | ||
504 | } | ||
505 | } | 515 | } |
506 | 516 | ||
507 | if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE) | 517 | if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE) |
@@ -568,6 +578,7 @@ static struct hid_driver magicmouse_driver = { | |||
568 | .remove = magicmouse_remove, | 578 | .remove = magicmouse_remove, |
569 | .raw_event = magicmouse_raw_event, | 579 | .raw_event = magicmouse_raw_event, |
570 | .input_mapping = magicmouse_input_mapping, | 580 | .input_mapping = magicmouse_input_mapping, |
581 | .input_configured = magicmouse_input_configured, | ||
571 | }; | 582 | }; |
572 | module_hid_driver(magicmouse_driver); | 583 | module_hid_driver(magicmouse_driver); |
573 | 584 | ||
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index db713c0dfba4..461a0d739d75 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c | |||
@@ -416,6 +416,8 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock) | |||
416 | ret = pm_runtime_get_sync(dev); | 416 | ret = pm_runtime_get_sync(dev); |
417 | if (ret < 0) { | 417 | if (ret < 0) { |
418 | dev_err(dev, "%s: can't power on device\n", __func__); | 418 | dev_err(dev, "%s: can't power on device\n", __func__); |
419 | pm_runtime_put_noidle(dev); | ||
420 | module_put(dev->driver->owner); | ||
419 | return ret; | 421 | return ret; |
420 | } | 422 | } |
421 | 423 | ||
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 0ceb6e1b0f65..e3085c487ace 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
@@ -182,7 +182,6 @@ static int dw_i2c_probe(struct platform_device *pdev) | |||
182 | adap->algo = &i2c_dw_algo; | 182 | adap->algo = &i2c_dw_algo; |
183 | adap->dev.parent = &pdev->dev; | 183 | adap->dev.parent = &pdev->dev; |
184 | adap->dev.of_node = pdev->dev.of_node; | 184 | adap->dev.of_node = pdev->dev.of_node; |
185 | ACPI_HANDLE_SET(&adap->dev, ACPI_HANDLE(&pdev->dev)); | ||
186 | 185 | ||
187 | r = i2c_add_numbered_adapter(adap); | 186 | r = i2c_add_numbered_adapter(adap); |
188 | if (r) { | 187 | if (r) { |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 5d6675013864..1a38dd7dfe4e 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -465,6 +465,7 @@ static const struct x86_cpu_id intel_idle_ids[] = { | |||
465 | ICPU(0x3c, idle_cpu_hsw), | 465 | ICPU(0x3c, idle_cpu_hsw), |
466 | ICPU(0x3f, idle_cpu_hsw), | 466 | ICPU(0x3f, idle_cpu_hsw), |
467 | ICPU(0x45, idle_cpu_hsw), | 467 | ICPU(0x45, idle_cpu_hsw), |
468 | ICPU(0x46, idle_cpu_hsw), | ||
468 | {} | 469 | {} |
469 | }; | 470 | }; |
470 | MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); | 471 | MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); |
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index 08a6c6d39e56..911205d3d5a0 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c | |||
@@ -44,7 +44,7 @@ | |||
44 | #include "qib.h" | 44 | #include "qib.h" |
45 | #include "qib_7220.h" | 45 | #include "qib_7220.h" |
46 | 46 | ||
47 | #define SD7220_FW_NAME "intel/sd7220.fw" | 47 | #define SD7220_FW_NAME "qlogic/sd7220.fw" |
48 | MODULE_FIRMWARE(SD7220_FW_NAME); | 48 | MODULE_FIRMWARE(SD7220_FW_NAME); |
49 | 49 | ||
50 | /* | 50 | /* |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 1daa97913b7d..0bfd8cf25200 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
@@ -359,7 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) | |||
359 | case 0x802: /* Intuos4 General Pen */ | 359 | case 0x802: /* Intuos4 General Pen */ |
360 | case 0x804: /* Intuos4 Marker Pen */ | 360 | case 0x804: /* Intuos4 Marker Pen */ |
361 | case 0x40802: /* Intuos4 Classic Pen */ | 361 | case 0x40802: /* Intuos4 Classic Pen */ |
362 | case 0x18803: /* DTH2242 Grip Pen */ | 362 | case 0x18802: /* DTH2242 Grip Pen */ |
363 | case 0x022: | 363 | case 0x022: |
364 | wacom->tool[idx] = BTN_TOOL_PEN; | 364 | wacom->tool[idx] = BTN_TOOL_PEN; |
365 | break; | 365 | break; |
@@ -1912,7 +1912,7 @@ static const struct wacom_features wacom_features_0xBB = | |||
1912 | { "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047, | 1912 | { "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047, |
1913 | 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; | 1913 | 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; |
1914 | static const struct wacom_features wacom_features_0xBC = | 1914 | static const struct wacom_features wacom_features_0xBC = |
1915 | { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047, | 1915 | { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40640, 25400, 2047, |
1916 | 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; | 1916 | 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; |
1917 | static const struct wacom_features wacom_features_0x26 = | 1917 | static const struct wacom_features wacom_features_0x26 = |
1918 | { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, | 1918 | { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, |
@@ -2144,7 +2144,7 @@ const struct usb_device_id wacom_ids[] = { | |||
2144 | { USB_DEVICE_WACOM(0x44) }, | 2144 | { USB_DEVICE_WACOM(0x44) }, |
2145 | { USB_DEVICE_WACOM(0x45) }, | 2145 | { USB_DEVICE_WACOM(0x45) }, |
2146 | { USB_DEVICE_WACOM(0x59) }, | 2146 | { USB_DEVICE_WACOM(0x59) }, |
2147 | { USB_DEVICE_WACOM(0x5D) }, | 2147 | { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) }, |
2148 | { USB_DEVICE_WACOM(0xB0) }, | 2148 | { USB_DEVICE_WACOM(0xB0) }, |
2149 | { USB_DEVICE_WACOM(0xB1) }, | 2149 | { USB_DEVICE_WACOM(0xB1) }, |
2150 | { USB_DEVICE_WACOM(0xB2) }, | 2150 | { USB_DEVICE_WACOM(0xB2) }, |
@@ -2209,7 +2209,7 @@ const struct usb_device_id wacom_ids[] = { | |||
2209 | { USB_DEVICE_WACOM(0x47) }, | 2209 | { USB_DEVICE_WACOM(0x47) }, |
2210 | { USB_DEVICE_WACOM(0xF4) }, | 2210 | { USB_DEVICE_WACOM(0xF4) }, |
2211 | { USB_DEVICE_WACOM(0xF8) }, | 2211 | { USB_DEVICE_WACOM(0xF8) }, |
2212 | { USB_DEVICE_WACOM(0xF6) }, | 2212 | { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) }, |
2213 | { USB_DEVICE_WACOM(0xFA) }, | 2213 | { USB_DEVICE_WACOM(0xFA) }, |
2214 | { USB_DEVICE_LENOVO(0x6004) }, | 2214 | { USB_DEVICE_LENOVO(0x6004) }, |
2215 | { } | 2215 | { } |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index a32e0d5aa45f..fc6aebf1e4b2 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -236,7 +236,8 @@ static int gic_retrigger(struct irq_data *d) | |||
236 | if (gic_arch_extn.irq_retrigger) | 236 | if (gic_arch_extn.irq_retrigger) |
237 | return gic_arch_extn.irq_retrigger(d); | 237 | return gic_arch_extn.irq_retrigger(d); |
238 | 238 | ||
239 | return -ENXIO; | 239 | /* the genirq layer expects 0 if we can't retrigger in hardware */ |
240 | return 0; | ||
240 | } | 241 | } |
241 | 242 | ||
242 | #ifdef CONFIG_SMP | 243 | #ifdef CONFIG_SMP |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 66120bd46d15..10744091e6ca 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include "dm.h" | 7 | #include "dm.h" |
8 | #include "dm-bio-prison.h" | 8 | #include "dm-bio-prison.h" |
9 | #include "dm-bio-record.h" | ||
9 | #include "dm-cache-metadata.h" | 10 | #include "dm-cache-metadata.h" |
10 | 11 | ||
11 | #include <linux/dm-io.h> | 12 | #include <linux/dm-io.h> |
@@ -201,10 +202,15 @@ struct per_bio_data { | |||
201 | unsigned req_nr:2; | 202 | unsigned req_nr:2; |
202 | struct dm_deferred_entry *all_io_entry; | 203 | struct dm_deferred_entry *all_io_entry; |
203 | 204 | ||
204 | /* writethrough fields */ | 205 | /* |
206 | * writethrough fields. These MUST remain at the end of this | ||
207 | * structure and the 'cache' member must be the first as it | ||
208 | * is used to determine the offsetof the writethrough fields. | ||
209 | */ | ||
205 | struct cache *cache; | 210 | struct cache *cache; |
206 | dm_cblock_t cblock; | 211 | dm_cblock_t cblock; |
207 | bio_end_io_t *saved_bi_end_io; | 212 | bio_end_io_t *saved_bi_end_io; |
213 | struct dm_bio_details bio_details; | ||
208 | }; | 214 | }; |
209 | 215 | ||
210 | struct dm_cache_migration { | 216 | struct dm_cache_migration { |
@@ -513,16 +519,28 @@ static void save_stats(struct cache *cache) | |||
513 | /*---------------------------------------------------------------- | 519 | /*---------------------------------------------------------------- |
514 | * Per bio data | 520 | * Per bio data |
515 | *--------------------------------------------------------------*/ | 521 | *--------------------------------------------------------------*/ |
516 | static struct per_bio_data *get_per_bio_data(struct bio *bio) | 522 | |
523 | /* | ||
524 | * If using writeback, leave out struct per_bio_data's writethrough fields. | ||
525 | */ | ||
526 | #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) | ||
527 | #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) | ||
528 | |||
529 | static size_t get_per_bio_data_size(struct cache *cache) | ||
530 | { | ||
531 | return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; | ||
532 | } | ||
533 | |||
534 | static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) | ||
517 | { | 535 | { |
518 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); | 536 | struct per_bio_data *pb = dm_per_bio_data(bio, data_size); |
519 | BUG_ON(!pb); | 537 | BUG_ON(!pb); |
520 | return pb; | 538 | return pb; |
521 | } | 539 | } |
522 | 540 | ||
523 | static struct per_bio_data *init_per_bio_data(struct bio *bio) | 541 | static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) |
524 | { | 542 | { |
525 | struct per_bio_data *pb = get_per_bio_data(bio); | 543 | struct per_bio_data *pb = get_per_bio_data(bio, data_size); |
526 | 544 | ||
527 | pb->tick = false; | 545 | pb->tick = false; |
528 | pb->req_nr = dm_bio_get_target_bio_nr(bio); | 546 | pb->req_nr = dm_bio_get_target_bio_nr(bio); |
@@ -556,7 +574,8 @@ static void remap_to_cache(struct cache *cache, struct bio *bio, | |||
556 | static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) | 574 | static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) |
557 | { | 575 | { |
558 | unsigned long flags; | 576 | unsigned long flags; |
559 | struct per_bio_data *pb = get_per_bio_data(bio); | 577 | size_t pb_data_size = get_per_bio_data_size(cache); |
578 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
560 | 579 | ||
561 | spin_lock_irqsave(&cache->lock, flags); | 580 | spin_lock_irqsave(&cache->lock, flags); |
562 | if (cache->need_tick_bio && | 581 | if (cache->need_tick_bio && |
@@ -635,7 +654,7 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio) | |||
635 | 654 | ||
636 | static void writethrough_endio(struct bio *bio, int err) | 655 | static void writethrough_endio(struct bio *bio, int err) |
637 | { | 656 | { |
638 | struct per_bio_data *pb = get_per_bio_data(bio); | 657 | struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); |
639 | bio->bi_end_io = pb->saved_bi_end_io; | 658 | bio->bi_end_io = pb->saved_bi_end_io; |
640 | 659 | ||
641 | if (err) { | 660 | if (err) { |
@@ -643,6 +662,7 @@ static void writethrough_endio(struct bio *bio, int err) | |||
643 | return; | 662 | return; |
644 | } | 663 | } |
645 | 664 | ||
665 | dm_bio_restore(&pb->bio_details, bio); | ||
646 | remap_to_cache(pb->cache, bio, pb->cblock); | 666 | remap_to_cache(pb->cache, bio, pb->cblock); |
647 | 667 | ||
648 | /* | 668 | /* |
@@ -662,11 +682,12 @@ static void writethrough_endio(struct bio *bio, int err) | |||
662 | static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, | 682 | static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, |
663 | dm_oblock_t oblock, dm_cblock_t cblock) | 683 | dm_oblock_t oblock, dm_cblock_t cblock) |
664 | { | 684 | { |
665 | struct per_bio_data *pb = get_per_bio_data(bio); | 685 | struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); |
666 | 686 | ||
667 | pb->cache = cache; | 687 | pb->cache = cache; |
668 | pb->cblock = cblock; | 688 | pb->cblock = cblock; |
669 | pb->saved_bi_end_io = bio->bi_end_io; | 689 | pb->saved_bi_end_io = bio->bi_end_io; |
690 | dm_bio_record(&pb->bio_details, bio); | ||
670 | bio->bi_end_io = writethrough_endio; | 691 | bio->bi_end_io = writethrough_endio; |
671 | 692 | ||
672 | remap_to_origin_clear_discard(pb->cache, bio, oblock); | 693 | remap_to_origin_clear_discard(pb->cache, bio, oblock); |
@@ -1035,7 +1056,8 @@ static void defer_bio(struct cache *cache, struct bio *bio) | |||
1035 | 1056 | ||
1036 | static void process_flush_bio(struct cache *cache, struct bio *bio) | 1057 | static void process_flush_bio(struct cache *cache, struct bio *bio) |
1037 | { | 1058 | { |
1038 | struct per_bio_data *pb = get_per_bio_data(bio); | 1059 | size_t pb_data_size = get_per_bio_data_size(cache); |
1060 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
1039 | 1061 | ||
1040 | BUG_ON(bio->bi_size); | 1062 | BUG_ON(bio->bi_size); |
1041 | if (!pb->req_nr) | 1063 | if (!pb->req_nr) |
@@ -1107,7 +1129,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs, | |||
1107 | dm_oblock_t block = get_bio_block(cache, bio); | 1129 | dm_oblock_t block = get_bio_block(cache, bio); |
1108 | struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; | 1130 | struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; |
1109 | struct policy_result lookup_result; | 1131 | struct policy_result lookup_result; |
1110 | struct per_bio_data *pb = get_per_bio_data(bio); | 1132 | size_t pb_data_size = get_per_bio_data_size(cache); |
1133 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
1111 | bool discarded_block = is_discarded_oblock(cache, block); | 1134 | bool discarded_block = is_discarded_oblock(cache, block); |
1112 | bool can_migrate = discarded_block || spare_migration_bandwidth(cache); | 1135 | bool can_migrate = discarded_block || spare_migration_bandwidth(cache); |
1113 | 1136 | ||
@@ -1881,7 +1904,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1881 | 1904 | ||
1882 | cache->ti = ca->ti; | 1905 | cache->ti = ca->ti; |
1883 | ti->private = cache; | 1906 | ti->private = cache; |
1884 | ti->per_bio_data_size = sizeof(struct per_bio_data); | ||
1885 | ti->num_flush_bios = 2; | 1907 | ti->num_flush_bios = 2; |
1886 | ti->flush_supported = true; | 1908 | ti->flush_supported = true; |
1887 | 1909 | ||
@@ -1890,6 +1912,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1890 | ti->discard_zeroes_data_unsupported = true; | 1912 | ti->discard_zeroes_data_unsupported = true; |
1891 | 1913 | ||
1892 | memcpy(&cache->features, &ca->features, sizeof(cache->features)); | 1914 | memcpy(&cache->features, &ca->features, sizeof(cache->features)); |
1915 | ti->per_bio_data_size = get_per_bio_data_size(cache); | ||
1893 | 1916 | ||
1894 | cache->callbacks.congested_fn = cache_is_congested; | 1917 | cache->callbacks.congested_fn = cache_is_congested; |
1895 | dm_table_add_target_callbacks(ti->table, &cache->callbacks); | 1918 | dm_table_add_target_callbacks(ti->table, &cache->callbacks); |
@@ -2092,6 +2115,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2092 | 2115 | ||
2093 | int r; | 2116 | int r; |
2094 | dm_oblock_t block = get_bio_block(cache, bio); | 2117 | dm_oblock_t block = get_bio_block(cache, bio); |
2118 | size_t pb_data_size = get_per_bio_data_size(cache); | ||
2095 | bool can_migrate = false; | 2119 | bool can_migrate = false; |
2096 | bool discarded_block; | 2120 | bool discarded_block; |
2097 | struct dm_bio_prison_cell *cell; | 2121 | struct dm_bio_prison_cell *cell; |
@@ -2108,7 +2132,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2108 | return DM_MAPIO_REMAPPED; | 2132 | return DM_MAPIO_REMAPPED; |
2109 | } | 2133 | } |
2110 | 2134 | ||
2111 | pb = init_per_bio_data(bio); | 2135 | pb = init_per_bio_data(bio, pb_data_size); |
2112 | 2136 | ||
2113 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { | 2137 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { |
2114 | defer_bio(cache, bio); | 2138 | defer_bio(cache, bio); |
@@ -2193,7 +2217,8 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
2193 | { | 2217 | { |
2194 | struct cache *cache = ti->private; | 2218 | struct cache *cache = ti->private; |
2195 | unsigned long flags; | 2219 | unsigned long flags; |
2196 | struct per_bio_data *pb = get_per_bio_data(bio); | 2220 | size_t pb_data_size = get_per_bio_data_size(cache); |
2221 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
2197 | 2222 | ||
2198 | if (pb->tick) { | 2223 | if (pb->tick) { |
2199 | policy_tick(cache->policy); | 2224 | policy_tick(cache->policy); |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 7e469260fe5e..9a0bdad9ad8f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -611,6 +611,7 @@ static void dec_pending(struct dm_io *io, int error) | |||
611 | queue_io(md, bio); | 611 | queue_io(md, bio); |
612 | } else { | 612 | } else { |
613 | /* done with normal IO or empty flush */ | 613 | /* done with normal IO or empty flush */ |
614 | trace_block_bio_complete(md->queue, bio, io_error); | ||
614 | bio_endio(bio, io_error); | 615 | bio_endio(bio, io_error); |
615 | } | 616 | } |
616 | } | 617 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 24909eb13fec..f4e87bfc7567 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -184,6 +184,8 @@ static void return_io(struct bio *return_bi) | |||
184 | return_bi = bi->bi_next; | 184 | return_bi = bi->bi_next; |
185 | bi->bi_next = NULL; | 185 | bi->bi_next = NULL; |
186 | bi->bi_size = 0; | 186 | bi->bi_size = 0; |
187 | trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), | ||
188 | bi, 0); | ||
187 | bio_endio(bi, 0); | 189 | bio_endio(bi, 0); |
188 | bi = return_bi; | 190 | bi = return_bi; |
189 | } | 191 | } |
@@ -3914,6 +3916,8 @@ static void raid5_align_endio(struct bio *bi, int error) | |||
3914 | rdev_dec_pending(rdev, conf->mddev); | 3916 | rdev_dec_pending(rdev, conf->mddev); |
3915 | 3917 | ||
3916 | if (!error && uptodate) { | 3918 | if (!error && uptodate) { |
3919 | trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), | ||
3920 | raid_bi, 0); | ||
3917 | bio_endio(raid_bi, 0); | 3921 | bio_endio(raid_bi, 0); |
3918 | if (atomic_dec_and_test(&conf->active_aligned_reads)) | 3922 | if (atomic_dec_and_test(&conf->active_aligned_reads)) |
3919 | wake_up(&conf->wait_for_stripe); | 3923 | wake_up(&conf->wait_for_stripe); |
@@ -4382,6 +4386,8 @@ static void make_request(struct mddev *mddev, struct bio * bi) | |||
4382 | if ( rw == WRITE ) | 4386 | if ( rw == WRITE ) |
4383 | md_write_end(mddev); | 4387 | md_write_end(mddev); |
4384 | 4388 | ||
4389 | trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), | ||
4390 | bi, 0); | ||
4385 | bio_endio(bi, 0); | 4391 | bio_endio(bi, 0); |
4386 | } | 4392 | } |
4387 | } | 4393 | } |
@@ -4758,8 +4764,11 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) | |||
4758 | handled++; | 4764 | handled++; |
4759 | } | 4765 | } |
4760 | remaining = raid5_dec_bi_active_stripes(raid_bio); | 4766 | remaining = raid5_dec_bi_active_stripes(raid_bio); |
4761 | if (remaining == 0) | 4767 | if (remaining == 0) { |
4768 | trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev), | ||
4769 | raid_bio, 0); | ||
4762 | bio_endio(raid_bio, 0); | 4770 | bio_endio(raid_bio, 0); |
4771 | } | ||
4763 | if (atomic_dec_and_test(&conf->active_aligned_reads)) | 4772 | if (atomic_dec_and_test(&conf->active_aligned_reads)) |
4764 | wake_up(&conf->wait_for_stripe); | 4773 | wake_up(&conf->wait_for_stripe); |
4765 | return handled; | 4774 | return handled; |
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c index f19cd7367040..4faaf8053f26 100644 --- a/drivers/media/dvb-frontends/mb86a20s.c +++ b/drivers/media/dvb-frontends/mb86a20s.c | |||
@@ -610,7 +610,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer, | |||
610 | __func__, 'A' + layer, segment * isdbt_rate[m][f][i]/1000, | 610 | __func__, 'A' + layer, segment * isdbt_rate[m][f][i]/1000, |
611 | rate, rate); | 611 | rate, rate); |
612 | 612 | ||
613 | state->estimated_rate[i] = rate; | 613 | state->estimated_rate[layer] = rate; |
614 | } | 614 | } |
615 | 615 | ||
616 | 616 | ||
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c index d4de021dc844..31ce7698acb9 100644 --- a/drivers/media/pci/cx25821/cx25821-video.c +++ b/drivers/media/pci/cx25821/cx25821-video.c | |||
@@ -461,7 +461,7 @@ int cx25821_video_register(struct cx25821_dev *dev) | |||
461 | 461 | ||
462 | spin_lock_init(&dev->slock); | 462 | spin_lock_init(&dev->slock); |
463 | 463 | ||
464 | for (i = 0; i < MAX_VID_CHANNEL_NUM - 1; ++i) { | 464 | for (i = 0; i < VID_CHANNEL_NUM; ++i) { |
465 | cx25821_init_controls(dev, i); | 465 | cx25821_init_controls(dev, i); |
466 | 466 | ||
467 | cx25821_risc_stopper(dev->pci, &dev->channels[i].vidq.stopper, | 467 | cx25821_risc_stopper(dev->pci, &dev->channels[i].vidq.stopper, |
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 05d7b6333461..a0639e779973 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig | |||
@@ -204,7 +204,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC | |||
204 | 204 | ||
205 | config VIDEO_SH_VEU | 205 | config VIDEO_SH_VEU |
206 | tristate "SuperH VEU mem2mem video processing driver" | 206 | tristate "SuperH VEU mem2mem video processing driver" |
207 | depends on VIDEO_DEV && VIDEO_V4L2 | 207 | depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS |
208 | select VIDEOBUF2_DMA_CONTIG | 208 | select VIDEOBUF2_DMA_CONTIG |
209 | select V4L2_MEM2MEM_DEV | 209 | select V4L2_MEM2MEM_DEV |
210 | help | 210 | help |
diff --git a/drivers/media/radio/radio-ma901.c b/drivers/media/radio/radio-ma901.c index c61f590029ad..348dafc0318a 100644 --- a/drivers/media/radio/radio-ma901.c +++ b/drivers/media/radio/radio-ma901.c | |||
@@ -347,9 +347,20 @@ static void usb_ma901radio_release(struct v4l2_device *v4l2_dev) | |||
347 | static int usb_ma901radio_probe(struct usb_interface *intf, | 347 | static int usb_ma901radio_probe(struct usb_interface *intf, |
348 | const struct usb_device_id *id) | 348 | const struct usb_device_id *id) |
349 | { | 349 | { |
350 | struct usb_device *dev = interface_to_usbdev(intf); | ||
350 | struct ma901radio_device *radio; | 351 | struct ma901radio_device *radio; |
351 | int retval = 0; | 352 | int retval = 0; |
352 | 353 | ||
354 | /* Masterkit MA901 usb radio has the same USB ID as many others | ||
355 | * Atmel V-USB devices. Let's make additional checks to be sure | ||
356 | * that this is our device. | ||
357 | */ | ||
358 | |||
359 | if (dev->product && dev->manufacturer && | ||
360 | (strncmp(dev->product, "MA901", 5) != 0 | ||
361 | || strncmp(dev->manufacturer, "www.masterkit.ru", 16) != 0)) | ||
362 | return -ENODEV; | ||
363 | |||
353 | radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL); | 364 | radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL); |
354 | if (!radio) { | 365 | if (!radio) { |
355 | dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n"); | 366 | dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n"); |
diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig index 39c2ecadb273..ea98f7e9ccd1 100644 --- a/drivers/misc/vmw_vmci/Kconfig +++ b/drivers/misc/vmw_vmci/Kconfig | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | config VMWARE_VMCI | 5 | config VMWARE_VMCI |
6 | tristate "VMware VMCI Driver" | 6 | tristate "VMware VMCI Driver" |
7 | depends on X86 && PCI | 7 | depends on X86 && PCI && NET |
8 | help | 8 | help |
9 | This is VMware's Virtual Machine Communication Interface. It enables | 9 | This is VMware's Virtual Machine Communication Interface. It enables |
10 | high-speed communication between host and guest in a virtual | 10 | high-speed communication between host and guest in a virtual |
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 92ab30ab00dc..dc571ebc1aa0 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c | |||
@@ -1123,33 +1123,6 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file, | |||
1123 | } | 1123 | } |
1124 | #endif | 1124 | #endif |
1125 | 1125 | ||
1126 | static inline unsigned long get_vm_size(struct vm_area_struct *vma) | ||
1127 | { | ||
1128 | return vma->vm_end - vma->vm_start; | ||
1129 | } | ||
1130 | |||
1131 | static inline resource_size_t get_vm_offset(struct vm_area_struct *vma) | ||
1132 | { | ||
1133 | return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT; | ||
1134 | } | ||
1135 | |||
1136 | /* | ||
1137 | * Set a new vm offset. | ||
1138 | * | ||
1139 | * Verify that the incoming offset really works as a page offset, | ||
1140 | * and that the offset and size fit in a resource_size_t. | ||
1141 | */ | ||
1142 | static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off) | ||
1143 | { | ||
1144 | pgoff_t pgoff = off >> PAGE_SHIFT; | ||
1145 | if (off != (resource_size_t) pgoff << PAGE_SHIFT) | ||
1146 | return -EINVAL; | ||
1147 | if (off + get_vm_size(vma) - 1 < off) | ||
1148 | return -EINVAL; | ||
1149 | vma->vm_pgoff = pgoff; | ||
1150 | return 0; | ||
1151 | } | ||
1152 | |||
1153 | /* | 1126 | /* |
1154 | * set up a mapping for shared memory segments | 1127 | * set up a mapping for shared memory segments |
1155 | */ | 1128 | */ |
@@ -1159,45 +1132,17 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) | |||
1159 | struct mtd_file_info *mfi = file->private_data; | 1132 | struct mtd_file_info *mfi = file->private_data; |
1160 | struct mtd_info *mtd = mfi->mtd; | 1133 | struct mtd_info *mtd = mfi->mtd; |
1161 | struct map_info *map = mtd->priv; | 1134 | struct map_info *map = mtd->priv; |
1162 | resource_size_t start, off; | ||
1163 | unsigned long len, vma_len; | ||
1164 | 1135 | ||
1165 | /* This is broken because it assumes the MTD device is map-based | 1136 | /* This is broken because it assumes the MTD device is map-based |
1166 | and that mtd->priv is a valid struct map_info. It should be | 1137 | and that mtd->priv is a valid struct map_info. It should be |
1167 | replaced with something that uses the mtd_get_unmapped_area() | 1138 | replaced with something that uses the mtd_get_unmapped_area() |
1168 | operation properly. */ | 1139 | operation properly. */ |
1169 | if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { | 1140 | if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { |
1170 | off = get_vm_offset(vma); | ||
1171 | start = map->phys; | ||
1172 | len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); | ||
1173 | start &= PAGE_MASK; | ||
1174 | vma_len = get_vm_size(vma); | ||
1175 | |||
1176 | /* Overflow in off+len? */ | ||
1177 | if (vma_len + off < off) | ||
1178 | return -EINVAL; | ||
1179 | /* Does it fit in the mapping? */ | ||
1180 | if (vma_len + off > len) | ||
1181 | return -EINVAL; | ||
1182 | |||
1183 | off += start; | ||
1184 | /* Did that overflow? */ | ||
1185 | if (off < start) | ||
1186 | return -EINVAL; | ||
1187 | if (set_vm_offset(vma, off) < 0) | ||
1188 | return -EINVAL; | ||
1189 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; | ||
1190 | |||
1191 | #ifdef pgprot_noncached | 1141 | #ifdef pgprot_noncached |
1192 | if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) | 1142 | if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) |
1193 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 1143 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1194 | #endif | 1144 | #endif |
1195 | if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, | 1145 | return vm_iomap_memory(vma, map->phys, map->size); |
1196 | vma->vm_end - vma->vm_start, | ||
1197 | vma->vm_page_prot)) | ||
1198 | return -EAGAIN; | ||
1199 | |||
1200 | return 0; | ||
1201 | } | 1146 | } |
1202 | return -ENOSYS; | 1147 | return -ENOSYS; |
1203 | #else | 1148 | #else |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6bbd90e1123c..dbbea0eec134 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -846,8 +846,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, | |||
846 | if (bond->dev->flags & IFF_ALLMULTI) | 846 | if (bond->dev->flags & IFF_ALLMULTI) |
847 | dev_set_allmulti(old_active->dev, -1); | 847 | dev_set_allmulti(old_active->dev, -1); |
848 | 848 | ||
849 | netif_addr_lock_bh(bond->dev); | ||
849 | netdev_for_each_mc_addr(ha, bond->dev) | 850 | netdev_for_each_mc_addr(ha, bond->dev) |
850 | dev_mc_del(old_active->dev, ha->addr); | 851 | dev_mc_del(old_active->dev, ha->addr); |
852 | netif_addr_unlock_bh(bond->dev); | ||
851 | } | 853 | } |
852 | 854 | ||
853 | if (new_active) { | 855 | if (new_active) { |
@@ -858,8 +860,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, | |||
858 | if (bond->dev->flags & IFF_ALLMULTI) | 860 | if (bond->dev->flags & IFF_ALLMULTI) |
859 | dev_set_allmulti(new_active->dev, 1); | 861 | dev_set_allmulti(new_active->dev, 1); |
860 | 862 | ||
863 | netif_addr_lock_bh(bond->dev); | ||
861 | netdev_for_each_mc_addr(ha, bond->dev) | 864 | netdev_for_each_mc_addr(ha, bond->dev) |
862 | dev_mc_add(new_active->dev, ha->addr); | 865 | dev_mc_add(new_active->dev, ha->addr); |
866 | netif_addr_unlock_bh(bond->dev); | ||
863 | } | 867 | } |
864 | } | 868 | } |
865 | 869 | ||
@@ -1901,11 +1905,29 @@ err_dest_symlinks: | |||
1901 | bond_destroy_slave_symlinks(bond_dev, slave_dev); | 1905 | bond_destroy_slave_symlinks(bond_dev, slave_dev); |
1902 | 1906 | ||
1903 | err_detach: | 1907 | err_detach: |
1908 | if (!USES_PRIMARY(bond->params.mode)) { | ||
1909 | netif_addr_lock_bh(bond_dev); | ||
1910 | bond_mc_list_flush(bond_dev, slave_dev); | ||
1911 | netif_addr_unlock_bh(bond_dev); | ||
1912 | } | ||
1913 | bond_del_vlans_from_slave(bond, slave_dev); | ||
1904 | write_lock_bh(&bond->lock); | 1914 | write_lock_bh(&bond->lock); |
1905 | bond_detach_slave(bond, new_slave); | 1915 | bond_detach_slave(bond, new_slave); |
1916 | if (bond->primary_slave == new_slave) | ||
1917 | bond->primary_slave = NULL; | ||
1906 | write_unlock_bh(&bond->lock); | 1918 | write_unlock_bh(&bond->lock); |
1919 | if (bond->curr_active_slave == new_slave) { | ||
1920 | read_lock(&bond->lock); | ||
1921 | write_lock_bh(&bond->curr_slave_lock); | ||
1922 | bond_change_active_slave(bond, NULL); | ||
1923 | bond_select_active_slave(bond); | ||
1924 | write_unlock_bh(&bond->curr_slave_lock); | ||
1925 | read_unlock(&bond->lock); | ||
1926 | } | ||
1927 | slave_disable_netpoll(new_slave); | ||
1907 | 1928 | ||
1908 | err_close: | 1929 | err_close: |
1930 | slave_dev->priv_flags &= ~IFF_BONDING; | ||
1909 | dev_close(slave_dev); | 1931 | dev_close(slave_dev); |
1910 | 1932 | ||
1911 | err_unset_master: | 1933 | err_unset_master: |
@@ -1976,12 +1998,11 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
1976 | return -EINVAL; | 1998 | return -EINVAL; |
1977 | } | 1999 | } |
1978 | 2000 | ||
2001 | write_unlock_bh(&bond->lock); | ||
1979 | /* unregister rx_handler early so bond_handle_frame wouldn't be called | 2002 | /* unregister rx_handler early so bond_handle_frame wouldn't be called |
1980 | * for this slave anymore. | 2003 | * for this slave anymore. |
1981 | */ | 2004 | */ |
1982 | netdev_rx_handler_unregister(slave_dev); | 2005 | netdev_rx_handler_unregister(slave_dev); |
1983 | write_unlock_bh(&bond->lock); | ||
1984 | synchronize_net(); | ||
1985 | write_lock_bh(&bond->lock); | 2006 | write_lock_bh(&bond->lock); |
1986 | 2007 | ||
1987 | if (!all && !bond->params.fail_over_mac) { | 2008 | if (!all && !bond->params.fail_over_mac) { |
@@ -3169,11 +3190,20 @@ static int bond_slave_netdev_event(unsigned long event, | |||
3169 | struct net_device *slave_dev) | 3190 | struct net_device *slave_dev) |
3170 | { | 3191 | { |
3171 | struct slave *slave = bond_slave_get_rtnl(slave_dev); | 3192 | struct slave *slave = bond_slave_get_rtnl(slave_dev); |
3172 | struct bonding *bond = slave->bond; | 3193 | struct bonding *bond; |
3173 | struct net_device *bond_dev = slave->bond->dev; | 3194 | struct net_device *bond_dev; |
3174 | u32 old_speed; | 3195 | u32 old_speed; |
3175 | u8 old_duplex; | 3196 | u8 old_duplex; |
3176 | 3197 | ||
3198 | /* A netdev event can be generated while enslaving a device | ||
3199 | * before netdev_rx_handler_register is called in which case | ||
3200 | * slave will be NULL | ||
3201 | */ | ||
3202 | if (!slave) | ||
3203 | return NOTIFY_DONE; | ||
3204 | bond_dev = slave->bond->dev; | ||
3205 | bond = slave->bond; | ||
3206 | |||
3177 | switch (event) { | 3207 | switch (event) { |
3178 | case NETDEV_UNREGISTER: | 3208 | case NETDEV_UNREGISTER: |
3179 | if (bond->setup_by_slave) | 3209 | if (bond->setup_by_slave) |
@@ -3287,20 +3317,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) | |||
3287 | */ | 3317 | */ |
3288 | static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) | 3318 | static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) |
3289 | { | 3319 | { |
3290 | struct ethhdr *data = (struct ethhdr *)skb->data; | 3320 | const struct ethhdr *data; |
3291 | struct iphdr *iph; | 3321 | const struct iphdr *iph; |
3292 | struct ipv6hdr *ipv6h; | 3322 | const struct ipv6hdr *ipv6h; |
3293 | u32 v6hash; | 3323 | u32 v6hash; |
3294 | __be32 *s, *d; | 3324 | const __be32 *s, *d; |
3295 | 3325 | ||
3296 | if (skb->protocol == htons(ETH_P_IP) && | 3326 | if (skb->protocol == htons(ETH_P_IP) && |
3297 | skb_network_header_len(skb) >= sizeof(*iph)) { | 3327 | pskb_network_may_pull(skb, sizeof(*iph))) { |
3298 | iph = ip_hdr(skb); | 3328 | iph = ip_hdr(skb); |
3329 | data = (struct ethhdr *)skb->data; | ||
3299 | return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ | 3330 | return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ |
3300 | (data->h_dest[5] ^ data->h_source[5])) % count; | 3331 | (data->h_dest[5] ^ data->h_source[5])) % count; |
3301 | } else if (skb->protocol == htons(ETH_P_IPV6) && | 3332 | } else if (skb->protocol == htons(ETH_P_IPV6) && |
3302 | skb_network_header_len(skb) >= sizeof(*ipv6h)) { | 3333 | pskb_network_may_pull(skb, sizeof(*ipv6h))) { |
3303 | ipv6h = ipv6_hdr(skb); | 3334 | ipv6h = ipv6_hdr(skb); |
3335 | data = (struct ethhdr *)skb->data; | ||
3304 | s = &ipv6h->saddr.s6_addr32[0]; | 3336 | s = &ipv6h->saddr.s6_addr32[0]; |
3305 | d = &ipv6h->daddr.s6_addr32[0]; | 3337 | d = &ipv6h->daddr.s6_addr32[0]; |
3306 | v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); | 3338 | v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); |
@@ -3319,33 +3351,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) | |||
3319 | static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) | 3351 | static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) |
3320 | { | 3352 | { |
3321 | u32 layer4_xor = 0; | 3353 | u32 layer4_xor = 0; |
3322 | struct iphdr *iph; | 3354 | const struct iphdr *iph; |
3323 | struct ipv6hdr *ipv6h; | 3355 | const struct ipv6hdr *ipv6h; |
3324 | __be32 *s, *d; | 3356 | const __be32 *s, *d; |
3325 | __be16 *layer4hdr; | 3357 | const __be16 *l4 = NULL; |
3358 | __be16 _l4[2]; | ||
3359 | int noff = skb_network_offset(skb); | ||
3360 | int poff; | ||
3326 | 3361 | ||
3327 | if (skb->protocol == htons(ETH_P_IP) && | 3362 | if (skb->protocol == htons(ETH_P_IP) && |
3328 | skb_network_header_len(skb) >= sizeof(*iph)) { | 3363 | pskb_may_pull(skb, noff + sizeof(*iph))) { |
3329 | iph = ip_hdr(skb); | 3364 | iph = ip_hdr(skb); |
3330 | if (!ip_is_fragment(iph) && | 3365 | poff = proto_ports_offset(iph->protocol); |
3331 | (iph->protocol == IPPROTO_TCP || | 3366 | |
3332 | iph->protocol == IPPROTO_UDP) && | 3367 | if (!ip_is_fragment(iph) && poff >= 0) { |
3333 | (skb_headlen(skb) - skb_network_offset(skb) >= | 3368 | l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff, |
3334 | iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) { | 3369 | sizeof(_l4), &_l4); |
3335 | layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); | 3370 | if (l4) |
3336 | layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); | 3371 | layer4_xor = ntohs(l4[0] ^ l4[1]); |
3337 | } | 3372 | } |
3338 | return (layer4_xor ^ | 3373 | return (layer4_xor ^ |
3339 | ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; | 3374 | ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; |
3340 | } else if (skb->protocol == htons(ETH_P_IPV6) && | 3375 | } else if (skb->protocol == htons(ETH_P_IPV6) && |
3341 | skb_network_header_len(skb) >= sizeof(*ipv6h)) { | 3376 | pskb_may_pull(skb, noff + sizeof(*ipv6h))) { |
3342 | ipv6h = ipv6_hdr(skb); | 3377 | ipv6h = ipv6_hdr(skb); |
3343 | if ((ipv6h->nexthdr == IPPROTO_TCP || | 3378 | poff = proto_ports_offset(ipv6h->nexthdr); |
3344 | ipv6h->nexthdr == IPPROTO_UDP) && | 3379 | if (poff >= 0) { |
3345 | (skb_headlen(skb) - skb_network_offset(skb) >= | 3380 | l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff, |
3346 | sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) { | 3381 | sizeof(_l4), &_l4); |
3347 | layer4hdr = (__be16 *)(ipv6h + 1); | 3382 | if (l4) |
3348 | layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); | 3383 | layer4_xor = ntohs(l4[0] ^ l4[1]); |
3349 | } | 3384 | } |
3350 | s = &ipv6h->saddr.s6_addr32[0]; | 3385 | s = &ipv6h->saddr.s6_addr32[0]; |
3351 | d = &ipv6h->daddr.s6_addr32[0]; | 3386 | d = &ipv6h->daddr.s6_addr32[0]; |
@@ -4847,9 +4882,18 @@ static int __net_init bond_net_init(struct net *net) | |||
4847 | static void __net_exit bond_net_exit(struct net *net) | 4882 | static void __net_exit bond_net_exit(struct net *net) |
4848 | { | 4883 | { |
4849 | struct bond_net *bn = net_generic(net, bond_net_id); | 4884 | struct bond_net *bn = net_generic(net, bond_net_id); |
4885 | struct bonding *bond, *tmp_bond; | ||
4886 | LIST_HEAD(list); | ||
4850 | 4887 | ||
4851 | bond_destroy_sysfs(bn); | 4888 | bond_destroy_sysfs(bn); |
4852 | bond_destroy_proc_dir(bn); | 4889 | bond_destroy_proc_dir(bn); |
4890 | |||
4891 | /* Kill off any bonds created after unregistering bond rtnl ops */ | ||
4892 | rtnl_lock(); | ||
4893 | list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) | ||
4894 | unregister_netdevice_queue(bond->dev, &list); | ||
4895 | unregister_netdevice_many(&list); | ||
4896 | rtnl_unlock(); | ||
4853 | } | 4897 | } |
4854 | 4898 | ||
4855 | static struct pernet_operations bond_net_ops = { | 4899 | static struct pernet_operations bond_net_ops = { |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index db103e03ba05..ea7a388f4843 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -527,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
527 | goto out; | 527 | goto out; |
528 | } | 528 | } |
529 | if (new_value < 0) { | 529 | if (new_value < 0) { |
530 | pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n", | 530 | pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n", |
531 | bond->dev->name, new_value, INT_MAX); | 531 | bond->dev->name, new_value, INT_MAX); |
532 | ret = -EINVAL; | 532 | ret = -EINVAL; |
533 | goto out; | 533 | goto out; |
@@ -542,14 +542,15 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
542 | pr_info("%s: Setting ARP monitoring interval to %d.\n", | 542 | pr_info("%s: Setting ARP monitoring interval to %d.\n", |
543 | bond->dev->name, new_value); | 543 | bond->dev->name, new_value); |
544 | bond->params.arp_interval = new_value; | 544 | bond->params.arp_interval = new_value; |
545 | if (bond->params.miimon) { | 545 | if (new_value) { |
546 | pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", | 546 | if (bond->params.miimon) { |
547 | bond->dev->name, bond->dev->name); | 547 | pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", |
548 | bond->params.miimon = 0; | 548 | bond->dev->name, bond->dev->name); |
549 | } | 549 | bond->params.miimon = 0; |
550 | if (!bond->params.arp_targets[0]) { | 550 | } |
551 | pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", | 551 | if (!bond->params.arp_targets[0]) |
552 | bond->dev->name); | 552 | pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", |
553 | bond->dev->name); | ||
553 | } | 554 | } |
554 | if (bond->dev->flags & IFF_UP) { | 555 | if (bond->dev->flags & IFF_UP) { |
555 | /* If the interface is up, we may need to fire off | 556 | /* If the interface is up, we may need to fire off |
@@ -557,10 +558,13 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
557 | * timer will get fired off when the open function | 558 | * timer will get fired off when the open function |
558 | * is called. | 559 | * is called. |
559 | */ | 560 | */ |
560 | cancel_delayed_work_sync(&bond->mii_work); | 561 | if (!new_value) { |
561 | queue_delayed_work(bond->wq, &bond->arp_work, 0); | 562 | cancel_delayed_work_sync(&bond->arp_work); |
563 | } else { | ||
564 | cancel_delayed_work_sync(&bond->mii_work); | ||
565 | queue_delayed_work(bond->wq, &bond->arp_work, 0); | ||
566 | } | ||
562 | } | 567 | } |
563 | |||
564 | out: | 568 | out: |
565 | rtnl_unlock(); | 569 | rtnl_unlock(); |
566 | return ret; | 570 | return ret; |
@@ -702,7 +706,7 @@ static ssize_t bonding_store_downdelay(struct device *d, | |||
702 | } | 706 | } |
703 | if (new_value < 0) { | 707 | if (new_value < 0) { |
704 | pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", | 708 | pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", |
705 | bond->dev->name, new_value, 1, INT_MAX); | 709 | bond->dev->name, new_value, 0, INT_MAX); |
706 | ret = -EINVAL; | 710 | ret = -EINVAL; |
707 | goto out; | 711 | goto out; |
708 | } else { | 712 | } else { |
@@ -757,8 +761,8 @@ static ssize_t bonding_store_updelay(struct device *d, | |||
757 | goto out; | 761 | goto out; |
758 | } | 762 | } |
759 | if (new_value < 0) { | 763 | if (new_value < 0) { |
760 | pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", | 764 | pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n", |
761 | bond->dev->name, new_value, 1, INT_MAX); | 765 | bond->dev->name, new_value, 0, INT_MAX); |
762 | ret = -EINVAL; | 766 | ret = -EINVAL; |
763 | goto out; | 767 | goto out; |
764 | } else { | 768 | } else { |
@@ -968,37 +972,37 @@ static ssize_t bonding_store_miimon(struct device *d, | |||
968 | } | 972 | } |
969 | if (new_value < 0) { | 973 | if (new_value < 0) { |
970 | pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n", | 974 | pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n", |
971 | bond->dev->name, new_value, 1, INT_MAX); | 975 | bond->dev->name, new_value, 0, INT_MAX); |
972 | ret = -EINVAL; | 976 | ret = -EINVAL; |
973 | goto out; | 977 | goto out; |
974 | } else { | 978 | } |
975 | pr_info("%s: Setting MII monitoring interval to %d.\n", | 979 | pr_info("%s: Setting MII monitoring interval to %d.\n", |
976 | bond->dev->name, new_value); | 980 | bond->dev->name, new_value); |
977 | bond->params.miimon = new_value; | 981 | bond->params.miimon = new_value; |
978 | if (bond->params.updelay) | 982 | if (bond->params.updelay) |
979 | pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", | 983 | pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", |
980 | bond->dev->name, | 984 | bond->dev->name, |
981 | bond->params.updelay * bond->params.miimon); | 985 | bond->params.updelay * bond->params.miimon); |
982 | if (bond->params.downdelay) | 986 | if (bond->params.downdelay) |
983 | pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", | 987 | pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", |
984 | bond->dev->name, | 988 | bond->dev->name, |
985 | bond->params.downdelay * bond->params.miimon); | 989 | bond->params.downdelay * bond->params.miimon); |
986 | if (bond->params.arp_interval) { | 990 | if (new_value && bond->params.arp_interval) { |
987 | pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", | 991 | pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", |
988 | bond->dev->name); | 992 | bond->dev->name); |
989 | bond->params.arp_interval = 0; | 993 | bond->params.arp_interval = 0; |
990 | if (bond->params.arp_validate) { | 994 | if (bond->params.arp_validate) |
991 | bond->params.arp_validate = | 995 | bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; |
992 | BOND_ARP_VALIDATE_NONE; | 996 | } |
993 | } | 997 | if (bond->dev->flags & IFF_UP) { |
994 | } | 998 | /* If the interface is up, we may need to fire off |
995 | 999 | * the MII timer. If the interface is down, the | |
996 | if (bond->dev->flags & IFF_UP) { | 1000 | * timer will get fired off when the open function |
997 | /* If the interface is up, we may need to fire off | 1001 | * is called. |
998 | * the MII timer. If the interface is down, the | 1002 | */ |
999 | * timer will get fired off when the open function | 1003 | if (!new_value) { |
1000 | * is called. | 1004 | cancel_delayed_work_sync(&bond->mii_work); |
1001 | */ | 1005 | } else { |
1002 | cancel_delayed_work_sync(&bond->arp_work); | 1006 | cancel_delayed_work_sync(&bond->arp_work); |
1003 | queue_delayed_work(bond->wq, &bond->mii_work, 0); | 1007 | queue_delayed_work(bond->wq, &bond->mii_work, 0); |
1004 | } | 1008 | } |
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index f32b9fc6a983..9aa0c64c33c8 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c | |||
@@ -929,6 +929,7 @@ static int mcp251x_open(struct net_device *net) | |||
929 | struct mcp251x_priv *priv = netdev_priv(net); | 929 | struct mcp251x_priv *priv = netdev_priv(net); |
930 | struct spi_device *spi = priv->spi; | 930 | struct spi_device *spi = priv->spi; |
931 | struct mcp251x_platform_data *pdata = spi->dev.platform_data; | 931 | struct mcp251x_platform_data *pdata = spi->dev.platform_data; |
932 | unsigned long flags; | ||
932 | int ret; | 933 | int ret; |
933 | 934 | ||
934 | ret = open_candev(net); | 935 | ret = open_candev(net); |
@@ -945,9 +946,14 @@ static int mcp251x_open(struct net_device *net) | |||
945 | priv->tx_skb = NULL; | 946 | priv->tx_skb = NULL; |
946 | priv->tx_len = 0; | 947 | priv->tx_len = 0; |
947 | 948 | ||
949 | flags = IRQF_ONESHOT; | ||
950 | if (pdata->irq_flags) | ||
951 | flags |= pdata->irq_flags; | ||
952 | else | ||
953 | flags |= IRQF_TRIGGER_FALLING; | ||
954 | |||
948 | ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, | 955 | ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, |
949 | pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING, | 956 | flags, DEVICE_NAME, priv); |
950 | DEVICE_NAME, priv); | ||
951 | if (ret) { | 957 | if (ret) { |
952 | dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); | 958 | dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); |
953 | if (pdata->transceiver_enable) | 959 | if (pdata->transceiver_enable) |
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index b39ca5b3ea7f..ff2ba86cd4a4 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig | |||
@@ -46,6 +46,7 @@ config CAN_EMS_PCI | |||
46 | config CAN_PEAK_PCMCIA | 46 | config CAN_PEAK_PCMCIA |
47 | tristate "PEAK PCAN-PC Card" | 47 | tristate "PEAK PCAN-PC Card" |
48 | depends on PCMCIA | 48 | depends on PCMCIA |
49 | depends on HAS_IOPORT | ||
49 | ---help--- | 50 | ---help--- |
50 | This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) | 51 | This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) |
51 | from PEAK-System (http://www.peak-system.com). To compile this | 52 | from PEAK-System (http://www.peak-system.com). To compile this |
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index a042cdc260dc..3c18d7d000ed 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c | |||
@@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv) | |||
348 | */ | 348 | */ |
349 | if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) == | 349 | if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) == |
350 | REG_CR_BASICCAN_INITIAL && | 350 | REG_CR_BASICCAN_INITIAL && |
351 | (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) && | 351 | (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) && |
352 | (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL)) | 352 | (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL)) |
353 | flag = 1; | 353 | flag = 1; |
354 | 354 | ||
@@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv) | |||
360 | * See states on p. 23 of the Datasheet. | 360 | * See states on p. 23 of the Datasheet. |
361 | */ | 361 | */ |
362 | if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL && | 362 | if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL && |
363 | priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL && | 363 | priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL && |
364 | priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL) | 364 | priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL) |
365 | return flag; | 365 | return flag; |
366 | 366 | ||
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index daf4013a8fc7..e4df307eaa90 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c | |||
@@ -92,7 +92,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val) | |||
92 | */ | 92 | */ |
93 | spin_lock_irqsave(&priv->cmdreg_lock, flags); | 93 | spin_lock_irqsave(&priv->cmdreg_lock, flags); |
94 | priv->write_reg(priv, REG_CMR, val); | 94 | priv->write_reg(priv, REG_CMR, val); |
95 | priv->read_reg(priv, REG_SR); | 95 | priv->read_reg(priv, SJA1000_REG_SR); |
96 | spin_unlock_irqrestore(&priv->cmdreg_lock, flags); | 96 | spin_unlock_irqrestore(&priv->cmdreg_lock, flags); |
97 | } | 97 | } |
98 | 98 | ||
@@ -502,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) | |||
502 | 502 | ||
503 | while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) { | 503 | while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) { |
504 | n++; | 504 | n++; |
505 | status = priv->read_reg(priv, REG_SR); | 505 | status = priv->read_reg(priv, SJA1000_REG_SR); |
506 | /* check for absent controller due to hw unplug */ | 506 | /* check for absent controller due to hw unplug */ |
507 | if (status == 0xFF && sja1000_is_absent(priv)) | 507 | if (status == 0xFF && sja1000_is_absent(priv)) |
508 | return IRQ_NONE; | 508 | return IRQ_NONE; |
@@ -530,7 +530,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) | |||
530 | /* receive interrupt */ | 530 | /* receive interrupt */ |
531 | while (status & SR_RBS) { | 531 | while (status & SR_RBS) { |
532 | sja1000_rx(dev); | 532 | sja1000_rx(dev); |
533 | status = priv->read_reg(priv, REG_SR); | 533 | status = priv->read_reg(priv, SJA1000_REG_SR); |
534 | /* check for absent controller */ | 534 | /* check for absent controller */ |
535 | if (status == 0xFF && sja1000_is_absent(priv)) | 535 | if (status == 0xFF && sja1000_is_absent(priv)) |
536 | return IRQ_NONE; | 536 | return IRQ_NONE; |
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h index afa99847a510..aa48e053da27 100644 --- a/drivers/net/can/sja1000/sja1000.h +++ b/drivers/net/can/sja1000/sja1000.h | |||
@@ -56,7 +56,7 @@ | |||
56 | /* SJA1000 registers - manual section 6.4 (Pelican Mode) */ | 56 | /* SJA1000 registers - manual section 6.4 (Pelican Mode) */ |
57 | #define REG_MOD 0x00 | 57 | #define REG_MOD 0x00 |
58 | #define REG_CMR 0x01 | 58 | #define REG_CMR 0x01 |
59 | #define REG_SR 0x02 | 59 | #define SJA1000_REG_SR 0x02 |
60 | #define REG_IR 0x03 | 60 | #define REG_IR 0x03 |
61 | #define REG_IER 0x04 | 61 | #define REG_IER 0x04 |
62 | #define REG_ALC 0x0B | 62 | #define REG_ALC 0x0B |
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c index 6433b81256cd..8e0c4a001939 100644 --- a/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/drivers/net/can/sja1000/sja1000_of_platform.c | |||
@@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) | |||
96 | struct net_device *dev; | 96 | struct net_device *dev; |
97 | struct sja1000_priv *priv; | 97 | struct sja1000_priv *priv; |
98 | struct resource res; | 98 | struct resource res; |
99 | const u32 *prop; | 99 | u32 prop; |
100 | int err, irq, res_size, prop_size; | 100 | int err, irq, res_size; |
101 | void __iomem *base; | 101 | void __iomem *base; |
102 | 102 | ||
103 | err = of_address_to_resource(np, 0, &res); | 103 | err = of_address_to_resource(np, 0, &res); |
@@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) | |||
138 | priv->read_reg = sja1000_ofp_read_reg; | 138 | priv->read_reg = sja1000_ofp_read_reg; |
139 | priv->write_reg = sja1000_ofp_write_reg; | 139 | priv->write_reg = sja1000_ofp_write_reg; |
140 | 140 | ||
141 | prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size); | 141 | err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop); |
142 | if (prop && (prop_size == sizeof(u32))) | 142 | if (!err) |
143 | priv->can.clock.freq = *prop / 2; | 143 | priv->can.clock.freq = prop / 2; |
144 | else | 144 | else |
145 | priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */ | 145 | priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */ |
146 | 146 | ||
147 | prop = of_get_property(np, "nxp,tx-output-mode", &prop_size); | 147 | err = of_property_read_u32(np, "nxp,tx-output-mode", &prop); |
148 | if (prop && (prop_size == sizeof(u32))) | 148 | if (!err) |
149 | priv->ocr |= *prop & OCR_MODE_MASK; | 149 | priv->ocr |= prop & OCR_MODE_MASK; |
150 | else | 150 | else |
151 | priv->ocr |= OCR_MODE_NORMAL; /* default */ | 151 | priv->ocr |= OCR_MODE_NORMAL; /* default */ |
152 | 152 | ||
153 | prop = of_get_property(np, "nxp,tx-output-config", &prop_size); | 153 | err = of_property_read_u32(np, "nxp,tx-output-config", &prop); |
154 | if (prop && (prop_size == sizeof(u32))) | 154 | if (!err) |
155 | priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK; | 155 | priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK; |
156 | else | 156 | else |
157 | priv->ocr |= OCR_TX0_PULLDOWN; /* default */ | 157 | priv->ocr |= OCR_TX0_PULLDOWN; /* default */ |
158 | 158 | ||
159 | prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size); | 159 | err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop); |
160 | if (prop && (prop_size == sizeof(u32)) && *prop) { | 160 | if (!err && prop) { |
161 | u32 divider = priv->can.clock.freq * 2 / *prop; | 161 | u32 divider = priv->can.clock.freq * 2 / prop; |
162 | 162 | ||
163 | if (divider > 1) | 163 | if (divider > 1) |
164 | priv->cdr |= divider / 2 - 1; | 164 | priv->cdr |= divider / 2 - 1; |
@@ -168,8 +168,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) | |||
168 | priv->cdr |= CDR_CLK_OFF; /* default */ | 168 | priv->cdr |= CDR_CLK_OFF; /* default */ |
169 | } | 169 | } |
170 | 170 | ||
171 | prop = of_get_property(np, "nxp,no-comparator-bypass", NULL); | 171 | if (!of_property_read_bool(np, "nxp,no-comparator-bypass")) |
172 | if (!prop) | ||
173 | priv->cdr |= CDR_CBP; /* default */ | 172 | priv->cdr |= CDR_CBP; /* default */ |
174 | 173 | ||
175 | priv->irq_flags = IRQF_SHARED; | 174 | priv->irq_flags = IRQF_SHARED; |
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index cab306a9888e..e1d26433d619 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c | |||
@@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev) | |||
828 | struct ei_device *ei_local; | 828 | struct ei_device *ei_local; |
829 | struct ax_device *ax; | 829 | struct ax_device *ax; |
830 | struct resource *irq, *mem, *mem2; | 830 | struct resource *irq, *mem, *mem2; |
831 | resource_size_t mem_size, mem2_size = 0; | 831 | unsigned long mem_size, mem2_size = 0; |
832 | int ret = 0; | 832 | int ret = 0; |
833 | 833 | ||
834 | dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); | 834 | dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); |
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h index 829b5ad71d0d..b5fd934585e9 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h | |||
@@ -186,7 +186,7 @@ struct atl1e_tpd_desc { | |||
186 | /* how about 0x2000 */ | 186 | /* how about 0x2000 */ |
187 | #define MAX_TX_BUF_LEN 0x2000 | 187 | #define MAX_TX_BUF_LEN 0x2000 |
188 | #define MAX_TX_BUF_SHIFT 13 | 188 | #define MAX_TX_BUF_SHIFT 13 |
189 | /*#define MAX_TX_BUF_LEN 0x3000 */ | 189 | #define MAX_TSO_SEG_SIZE 0x3c00 |
190 | 190 | ||
191 | /* rrs word 1 bit 0:31 */ | 191 | /* rrs word 1 bit 0:31 */ |
192 | #define RRS_RX_CSUM_MASK 0xFFFF | 192 | #define RRS_RX_CSUM_MASK 0xFFFF |
@@ -438,7 +438,6 @@ struct atl1e_adapter { | |||
438 | struct atl1e_hw hw; | 438 | struct atl1e_hw hw; |
439 | struct atl1e_hw_stats hw_stats; | 439 | struct atl1e_hw_stats hw_stats; |
440 | 440 | ||
441 | bool have_msi; | ||
442 | u32 wol; | 441 | u32 wol; |
443 | u16 link_speed; | 442 | u16 link_speed; |
444 | u16 link_duplex; | 443 | u16 link_duplex; |
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 92f4734f860d..ac25f05ff68f 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c | |||
@@ -1849,34 +1849,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter) | |||
1849 | struct net_device *netdev = adapter->netdev; | 1849 | struct net_device *netdev = adapter->netdev; |
1850 | 1850 | ||
1851 | free_irq(adapter->pdev->irq, netdev); | 1851 | free_irq(adapter->pdev->irq, netdev); |
1852 | |||
1853 | if (adapter->have_msi) | ||
1854 | pci_disable_msi(adapter->pdev); | ||
1855 | } | 1852 | } |
1856 | 1853 | ||
1857 | static int atl1e_request_irq(struct atl1e_adapter *adapter) | 1854 | static int atl1e_request_irq(struct atl1e_adapter *adapter) |
1858 | { | 1855 | { |
1859 | struct pci_dev *pdev = adapter->pdev; | 1856 | struct pci_dev *pdev = adapter->pdev; |
1860 | struct net_device *netdev = adapter->netdev; | 1857 | struct net_device *netdev = adapter->netdev; |
1861 | int flags = 0; | ||
1862 | int err = 0; | 1858 | int err = 0; |
1863 | 1859 | ||
1864 | adapter->have_msi = true; | 1860 | err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name, |
1865 | err = pci_enable_msi(pdev); | 1861 | netdev); |
1866 | if (err) { | ||
1867 | netdev_dbg(netdev, | ||
1868 | "Unable to allocate MSI interrupt Error: %d\n", err); | ||
1869 | adapter->have_msi = false; | ||
1870 | } | ||
1871 | |||
1872 | if (!adapter->have_msi) | ||
1873 | flags |= IRQF_SHARED; | ||
1874 | err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev); | ||
1875 | if (err) { | 1862 | if (err) { |
1876 | netdev_dbg(adapter->netdev, | 1863 | netdev_dbg(adapter->netdev, |
1877 | "Unable to allocate interrupt Error: %d\n", err); | 1864 | "Unable to allocate interrupt Error: %d\n", err); |
1878 | if (adapter->have_msi) | ||
1879 | pci_disable_msi(pdev); | ||
1880 | return err; | 1865 | return err; |
1881 | } | 1866 | } |
1882 | netdev_dbg(netdev, "atl1e_request_irq OK\n"); | 1867 | netdev_dbg(netdev, "atl1e_request_irq OK\n"); |
@@ -2344,6 +2329,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2344 | 2329 | ||
2345 | INIT_WORK(&adapter->reset_task, atl1e_reset_task); | 2330 | INIT_WORK(&adapter->reset_task, atl1e_reset_task); |
2346 | INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); | 2331 | INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); |
2332 | netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); | ||
2347 | err = register_netdev(netdev); | 2333 | err = register_netdev(netdev); |
2348 | if (err) { | 2334 | if (err) { |
2349 | netdev_err(netdev, "register netdevice failed\n"); | 2335 | netdev_err(netdev, "register netdevice failed\n"); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4046f97378c2..57619dd4a92b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -2614,6 +2614,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
2614 | } | 2614 | } |
2615 | } | 2615 | } |
2616 | 2616 | ||
2617 | /* initialize FW coalescing state machines in RAM */ | ||
2618 | bnx2x_update_coalesce(bp); | ||
2619 | |||
2617 | /* setup the leading queue */ | 2620 | /* setup the leading queue */ |
2618 | rc = bnx2x_setup_leading(bp); | 2621 | rc = bnx2x_setup_leading(bp); |
2619 | if (rc) { | 2622 | if (rc) { |
@@ -4580,11 +4583,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, | |||
4580 | u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); | 4583 | u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); |
4581 | u32 addr = BAR_CSTRORM_INTMEM + | 4584 | u32 addr = BAR_CSTRORM_INTMEM + |
4582 | CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); | 4585 | CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); |
4583 | u16 flags = REG_RD16(bp, addr); | 4586 | u8 flags = REG_RD8(bp, addr); |
4584 | /* clear and set */ | 4587 | /* clear and set */ |
4585 | flags &= ~HC_INDEX_DATA_HC_ENABLED; | 4588 | flags &= ~HC_INDEX_DATA_HC_ENABLED; |
4586 | flags |= enable_flag; | 4589 | flags |= enable_flag; |
4587 | REG_WR16(bp, addr, flags); | 4590 | REG_WR8(bp, addr, flags); |
4588 | DP(NETIF_MSG_IFUP, | 4591 | DP(NETIF_MSG_IFUP, |
4589 | "port %x fw_sb_id %d sb_index %d disable %d\n", | 4592 | "port %x fw_sb_id %d sb_index %d disable %d\n", |
4590 | port, fw_sb_id, sb_index, disable); | 4593 | port, fw_sb_id, sb_index, disable); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 77ebae0ac64a..0283f343b0d1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -13437,13 +13437,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13437 | { | 13437 | { |
13438 | struct bnx2x *bp = params->bp; | 13438 | struct bnx2x *bp = params->bp; |
13439 | u16 base_page, next_page, not_kr2_device, lane; | 13439 | u16 base_page, next_page, not_kr2_device, lane; |
13440 | int sigdet = bnx2x_warpcore_get_sigdet(phy, params); | 13440 | int sigdet; |
13441 | |||
13442 | if (!sigdet) { | ||
13443 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) | ||
13444 | bnx2x_kr2_recovery(params, vars, phy); | ||
13445 | return; | ||
13446 | } | ||
13447 | 13441 | ||
13448 | /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery | 13442 | /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery |
13449 | * since some switches tend to reinit the AN process and clear the | 13443 | * since some switches tend to reinit the AN process and clear the |
@@ -13454,6 +13448,16 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13454 | vars->check_kr2_recovery_cnt--; | 13448 | vars->check_kr2_recovery_cnt--; |
13455 | return; | 13449 | return; |
13456 | } | 13450 | } |
13451 | |||
13452 | sigdet = bnx2x_warpcore_get_sigdet(phy, params); | ||
13453 | if (!sigdet) { | ||
13454 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { | ||
13455 | bnx2x_kr2_recovery(params, vars, phy); | ||
13456 | DP(NETIF_MSG_LINK, "No sigdet\n"); | ||
13457 | } | ||
13458 | return; | ||
13459 | } | ||
13460 | |||
13457 | lane = bnx2x_get_warpcore_lane(phy, params); | 13461 | lane = bnx2x_get_warpcore_lane(phy, params); |
13458 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, | 13462 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, |
13459 | MDIO_AER_BLOCK_AER_REG, lane); | 13463 | MDIO_AER_BLOCK_AER_REG, lane); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e81a747ea8ce..c50696b396f1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp) | |||
4947 | q); | 4947 | q); |
4948 | } | 4948 | } |
4949 | 4949 | ||
4950 | if (!NO_FCOE(bp)) { | 4950 | if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { |
4951 | fp = &bp->fp[FCOE_IDX(bp)]; | 4951 | fp = &bp->fp[FCOE_IDX(bp)]; |
4952 | queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; | 4952 | queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; |
4953 | 4953 | ||
@@ -9878,6 +9878,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
9878 | REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); | 9878 | REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); |
9879 | } | 9879 | } |
9880 | } | 9880 | } |
9881 | if (!CHIP_IS_E1x(bp)) | ||
9882 | /* block FW from writing to host */ | ||
9883 | REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); | ||
9884 | |||
9881 | /* wait until BRB is empty */ | 9885 | /* wait until BRB is empty */ |
9882 | tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); | 9886 | tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); |
9883 | while (timer_count) { | 9887 | while (timer_count) { |
@@ -13354,6 +13358,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev) | |||
13354 | RCU_INIT_POINTER(bp->cnic_ops, NULL); | 13358 | RCU_INIT_POINTER(bp->cnic_ops, NULL); |
13355 | mutex_unlock(&bp->cnic_mutex); | 13359 | mutex_unlock(&bp->cnic_mutex); |
13356 | synchronize_rcu(); | 13360 | synchronize_rcu(); |
13361 | bp->cnic_enabled = false; | ||
13357 | kfree(bp->cnic_kwq); | 13362 | kfree(bp->cnic_kwq); |
13358 | bp->cnic_kwq = NULL; | 13363 | bp->cnic_kwq = NULL; |
13359 | 13364 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 67d2663b3974..17a972734ba7 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -14604,8 +14604,11 @@ static void tg3_read_vpd(struct tg3 *tp) | |||
14604 | if (j + len > block_end) | 14604 | if (j + len > block_end) |
14605 | goto partno; | 14605 | goto partno; |
14606 | 14606 | ||
14607 | memcpy(tp->fw_ver, &vpd_data[j], len); | 14607 | if (len >= sizeof(tp->fw_ver)) |
14608 | strncat(tp->fw_ver, " bc ", vpdlen - len - 1); | 14608 | len = sizeof(tp->fw_ver) - 1; |
14609 | memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); | ||
14610 | snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, | ||
14611 | &vpd_data[j]); | ||
14609 | } | 14612 | } |
14610 | 14613 | ||
14611 | partno: | 14614 | partno: |
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index a170065b5973..b0ebc9f6d55e 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c | |||
@@ -163,6 +163,7 @@ | |||
163 | #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ | 163 | #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ |
164 | 164 | ||
165 | /* XGMAC_INT_STAT reg */ | 165 | /* XGMAC_INT_STAT reg */ |
166 | #define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */ | ||
166 | #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ | 167 | #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ |
167 | #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ | 168 | #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ |
168 | 169 | ||
@@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev) | |||
960 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); | 961 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); |
961 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); | 962 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); |
962 | 963 | ||
964 | /* Mask power mgt interrupt */ | ||
965 | writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); | ||
966 | |||
963 | /* XGMAC requires AXI bus init. This is a 'magic number' for now */ | 967 | /* XGMAC requires AXI bus init. This is a 'magic number' for now */ |
964 | writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); | 968 | writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); |
965 | 969 | ||
@@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit) | |||
1141 | struct sk_buff *skb; | 1145 | struct sk_buff *skb; |
1142 | int frame_len; | 1146 | int frame_len; |
1143 | 1147 | ||
1148 | if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ)) | ||
1149 | break; | ||
1150 | |||
1144 | entry = priv->rx_tail; | 1151 | entry = priv->rx_tail; |
1145 | p = priv->dma_rx + entry; | 1152 | p = priv->dma_rx + entry; |
1146 | if (desc_get_owner(p)) | 1153 | if (desc_get_owner(p)) |
@@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) | |||
1825 | unsigned int pmt = 0; | 1832 | unsigned int pmt = 0; |
1826 | 1833 | ||
1827 | if (mode & WAKE_MAGIC) | 1834 | if (mode & WAKE_MAGIC) |
1828 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT; | 1835 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN; |
1829 | if (mode & WAKE_UCAST) | 1836 | if (mode & WAKE_UCAST) |
1830 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; | 1837 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; |
1831 | 1838 | ||
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 8cdf02503d13..9eada8e86078 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c | |||
@@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count) | |||
257 | tmp = readl(reg); | 257 | tmp = readl(reg); |
258 | } | 258 | } |
259 | 259 | ||
260 | /* | ||
261 | * Sleep, either by using msleep() or if we are suspending, then | ||
262 | * use mdelay() to sleep. | ||
263 | */ | ||
264 | static void dm9000_msleep(board_info_t *db, unsigned int ms) | ||
265 | { | ||
266 | if (db->in_suspend) | ||
267 | mdelay(ms); | ||
268 | else | ||
269 | msleep(ms); | ||
270 | } | ||
271 | |||
272 | /* Read a word from phyxcer */ | ||
273 | static int | ||
274 | dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) | ||
275 | { | ||
276 | board_info_t *db = netdev_priv(dev); | ||
277 | unsigned long flags; | ||
278 | unsigned int reg_save; | ||
279 | int ret; | ||
280 | |||
281 | mutex_lock(&db->addr_lock); | ||
282 | |||
283 | spin_lock_irqsave(&db->lock, flags); | ||
284 | |||
285 | /* Save previous register address */ | ||
286 | reg_save = readb(db->io_addr); | ||
287 | |||
288 | /* Fill the phyxcer register into REG_0C */ | ||
289 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
290 | |||
291 | /* Issue phyxcer read command */ | ||
292 | iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); | ||
293 | |||
294 | writeb(reg_save, db->io_addr); | ||
295 | spin_unlock_irqrestore(&db->lock, flags); | ||
296 | |||
297 | dm9000_msleep(db, 1); /* Wait read complete */ | ||
298 | |||
299 | spin_lock_irqsave(&db->lock, flags); | ||
300 | reg_save = readb(db->io_addr); | ||
301 | |||
302 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ | ||
303 | |||
304 | /* The read data keeps on REG_0D & REG_0E */ | ||
305 | ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); | ||
306 | |||
307 | /* restore the previous address */ | ||
308 | writeb(reg_save, db->io_addr); | ||
309 | spin_unlock_irqrestore(&db->lock, flags); | ||
310 | |||
311 | mutex_unlock(&db->addr_lock); | ||
312 | |||
313 | dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); | ||
314 | return ret; | ||
315 | } | ||
316 | |||
317 | /* Write a word to phyxcer */ | ||
318 | static void | ||
319 | dm9000_phy_write(struct net_device *dev, | ||
320 | int phyaddr_unused, int reg, int value) | ||
321 | { | ||
322 | board_info_t *db = netdev_priv(dev); | ||
323 | unsigned long flags; | ||
324 | unsigned long reg_save; | ||
325 | |||
326 | dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); | ||
327 | mutex_lock(&db->addr_lock); | ||
328 | |||
329 | spin_lock_irqsave(&db->lock, flags); | ||
330 | |||
331 | /* Save previous register address */ | ||
332 | reg_save = readb(db->io_addr); | ||
333 | |||
334 | /* Fill the phyxcer register into REG_0C */ | ||
335 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
336 | |||
337 | /* Fill the written data into REG_0D & REG_0E */ | ||
338 | iow(db, DM9000_EPDRL, value); | ||
339 | iow(db, DM9000_EPDRH, value >> 8); | ||
340 | |||
341 | /* Issue phyxcer write command */ | ||
342 | iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); | ||
343 | |||
344 | writeb(reg_save, db->io_addr); | ||
345 | spin_unlock_irqrestore(&db->lock, flags); | ||
346 | |||
347 | dm9000_msleep(db, 1); /* Wait write complete */ | ||
348 | |||
349 | spin_lock_irqsave(&db->lock, flags); | ||
350 | reg_save = readb(db->io_addr); | ||
351 | |||
352 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ | ||
353 | |||
354 | /* restore the previous address */ | ||
355 | writeb(reg_save, db->io_addr); | ||
356 | |||
357 | spin_unlock_irqrestore(&db->lock, flags); | ||
358 | mutex_unlock(&db->addr_lock); | ||
359 | } | ||
360 | |||
260 | /* dm9000_set_io | 361 | /* dm9000_set_io |
261 | * | 362 | * |
262 | * select the specified set of io routines to use with the | 363 | * select the specified set of io routines to use with the |
@@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev) | |||
795 | 896 | ||
796 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ | 897 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ |
797 | 898 | ||
899 | dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ | ||
900 | dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */ | ||
901 | |||
798 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; | 902 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; |
799 | 903 | ||
800 | /* if wol is needed, then always set NCR_WAKEEN otherwise we end | 904 | /* if wol is needed, then always set NCR_WAKEEN otherwise we end |
@@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev) | |||
1201 | return 0; | 1305 | return 0; |
1202 | } | 1306 | } |
1203 | 1307 | ||
1204 | /* | ||
1205 | * Sleep, either by using msleep() or if we are suspending, then | ||
1206 | * use mdelay() to sleep. | ||
1207 | */ | ||
1208 | static void dm9000_msleep(board_info_t *db, unsigned int ms) | ||
1209 | { | ||
1210 | if (db->in_suspend) | ||
1211 | mdelay(ms); | ||
1212 | else | ||
1213 | msleep(ms); | ||
1214 | } | ||
1215 | |||
1216 | /* | ||
1217 | * Read a word from phyxcer | ||
1218 | */ | ||
1219 | static int | ||
1220 | dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) | ||
1221 | { | ||
1222 | board_info_t *db = netdev_priv(dev); | ||
1223 | unsigned long flags; | ||
1224 | unsigned int reg_save; | ||
1225 | int ret; | ||
1226 | |||
1227 | mutex_lock(&db->addr_lock); | ||
1228 | |||
1229 | spin_lock_irqsave(&db->lock,flags); | ||
1230 | |||
1231 | /* Save previous register address */ | ||
1232 | reg_save = readb(db->io_addr); | ||
1233 | |||
1234 | /* Fill the phyxcer register into REG_0C */ | ||
1235 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
1236 | |||
1237 | iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */ | ||
1238 | |||
1239 | writeb(reg_save, db->io_addr); | ||
1240 | spin_unlock_irqrestore(&db->lock,flags); | ||
1241 | |||
1242 | dm9000_msleep(db, 1); /* Wait read complete */ | ||
1243 | |||
1244 | spin_lock_irqsave(&db->lock,flags); | ||
1245 | reg_save = readb(db->io_addr); | ||
1246 | |||
1247 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ | ||
1248 | |||
1249 | /* The read data keeps on REG_0D & REG_0E */ | ||
1250 | ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); | ||
1251 | |||
1252 | /* restore the previous address */ | ||
1253 | writeb(reg_save, db->io_addr); | ||
1254 | spin_unlock_irqrestore(&db->lock,flags); | ||
1255 | |||
1256 | mutex_unlock(&db->addr_lock); | ||
1257 | |||
1258 | dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); | ||
1259 | return ret; | ||
1260 | } | ||
1261 | |||
1262 | /* | ||
1263 | * Write a word to phyxcer | ||
1264 | */ | ||
1265 | static void | ||
1266 | dm9000_phy_write(struct net_device *dev, | ||
1267 | int phyaddr_unused, int reg, int value) | ||
1268 | { | ||
1269 | board_info_t *db = netdev_priv(dev); | ||
1270 | unsigned long flags; | ||
1271 | unsigned long reg_save; | ||
1272 | |||
1273 | dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); | ||
1274 | mutex_lock(&db->addr_lock); | ||
1275 | |||
1276 | spin_lock_irqsave(&db->lock,flags); | ||
1277 | |||
1278 | /* Save previous register address */ | ||
1279 | reg_save = readb(db->io_addr); | ||
1280 | |||
1281 | /* Fill the phyxcer register into REG_0C */ | ||
1282 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
1283 | |||
1284 | /* Fill the written data into REG_0D & REG_0E */ | ||
1285 | iow(db, DM9000_EPDRL, value); | ||
1286 | iow(db, DM9000_EPDRH, value >> 8); | ||
1287 | |||
1288 | iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */ | ||
1289 | |||
1290 | writeb(reg_save, db->io_addr); | ||
1291 | spin_unlock_irqrestore(&db->lock, flags); | ||
1292 | |||
1293 | dm9000_msleep(db, 1); /* Wait write complete */ | ||
1294 | |||
1295 | spin_lock_irqsave(&db->lock,flags); | ||
1296 | reg_save = readb(db->io_addr); | ||
1297 | |||
1298 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ | ||
1299 | |||
1300 | /* restore the previous address */ | ||
1301 | writeb(reg_save, db->io_addr); | ||
1302 | |||
1303 | spin_unlock_irqrestore(&db->lock, flags); | ||
1304 | mutex_unlock(&db->addr_lock); | ||
1305 | } | ||
1306 | |||
1307 | static void | 1308 | static void |
1308 | dm9000_shutdown(struct net_device *dev) | 1309 | dm9000_shutdown(struct net_device *dev) |
1309 | { | 1310 | { |
@@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev) | |||
1502 | db->flags |= DM9000_PLATF_SIMPLE_PHY; | 1503 | db->flags |= DM9000_PLATF_SIMPLE_PHY; |
1503 | #endif | 1504 | #endif |
1504 | 1505 | ||
1505 | dm9000_reset(db); | 1506 | /* Fixing bug on dm9000_probe, takeover dm9000_reset(db), |
1507 | * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo | ||
1508 | * while probe stage. | ||
1509 | */ | ||
1510 | |||
1511 | iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST); | ||
1506 | 1512 | ||
1507 | /* try multiple times, DM9000 sometimes gets the read wrong */ | 1513 | /* try multiple times, DM9000 sometimes gets the read wrong */ |
1508 | for (i = 0; i < 8; i++) { | 1514 | for (i = 0; i < 8; i++) { |
diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h index 55688bd1a3ef..9ce058adabab 100644 --- a/drivers/net/ethernet/davicom/dm9000.h +++ b/drivers/net/ethernet/davicom/dm9000.h | |||
@@ -69,7 +69,9 @@ | |||
69 | #define NCR_WAKEEN (1<<6) | 69 | #define NCR_WAKEEN (1<<6) |
70 | #define NCR_FCOL (1<<4) | 70 | #define NCR_FCOL (1<<4) |
71 | #define NCR_FDX (1<<3) | 71 | #define NCR_FDX (1<<3) |
72 | #define NCR_LBK (3<<1) | 72 | |
73 | #define NCR_RESERVED (3<<1) | ||
74 | #define NCR_MAC_LBK (1<<1) | ||
73 | #define NCR_RST (1<<0) | 75 | #define NCR_RST (1<<0) |
74 | 76 | ||
75 | #define NSR_SPEED (1<<7) | 77 | #define NSR_SPEED (1<<7) |
@@ -167,5 +169,12 @@ | |||
167 | #define ISR_LNKCHNG (1<<5) | 169 | #define ISR_LNKCHNG (1<<5) |
168 | #define ISR_UNDERRUN (1<<4) | 170 | #define ISR_UNDERRUN (1<<4) |
169 | 171 | ||
172 | /* Davicom MII registers. | ||
173 | */ | ||
174 | |||
175 | #define MII_DM_DSPCR 0x1b /* DSP Control Register */ | ||
176 | |||
177 | #define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */ | ||
178 | |||
170 | #endif /* _DM9000X_H_ */ | 179 | #endif /* _DM9000X_H_ */ |
171 | 180 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 08e54f3d288b..2886c9b63f90 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -759,8 +759,9 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, | |||
759 | 759 | ||
760 | if (vlan_tx_tag_present(skb)) { | 760 | if (vlan_tx_tag_present(skb)) { |
761 | vlan_tag = be_get_tx_vlan_tag(adapter, skb); | 761 | vlan_tag = be_get_tx_vlan_tag(adapter, skb); |
762 | __vlan_put_tag(skb, vlan_tag); | 762 | skb = __vlan_put_tag(skb, vlan_tag); |
763 | skb->vlan_tci = 0; | 763 | if (skb) |
764 | skb->vlan_tci = 0; | ||
764 | } | 765 | } |
765 | 766 | ||
766 | return skb; | 767 | return skb; |
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index 911d0253dbb2..73195f643c9c 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c | |||
@@ -345,6 +345,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
345 | return NETDEV_TX_OK; | 345 | return NETDEV_TX_OK; |
346 | } | 346 | } |
347 | 347 | ||
348 | /* Init RX & TX buffer descriptors | ||
349 | */ | ||
350 | static void fec_enet_bd_init(struct net_device *dev) | ||
351 | { | ||
352 | struct fec_enet_private *fep = netdev_priv(dev); | ||
353 | struct bufdesc *bdp; | ||
354 | unsigned int i; | ||
355 | |||
356 | /* Initialize the receive buffer descriptors. */ | ||
357 | bdp = fep->rx_bd_base; | ||
358 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
359 | |||
360 | /* Initialize the BD for every fragment in the page. */ | ||
361 | if (bdp->cbd_bufaddr) | ||
362 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | ||
363 | else | ||
364 | bdp->cbd_sc = 0; | ||
365 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
366 | } | ||
367 | |||
368 | /* Set the last buffer to wrap */ | ||
369 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
370 | bdp->cbd_sc |= BD_SC_WRAP; | ||
371 | |||
372 | fep->cur_rx = fep->rx_bd_base; | ||
373 | |||
374 | /* ...and the same for transmit */ | ||
375 | bdp = fep->tx_bd_base; | ||
376 | fep->cur_tx = bdp; | ||
377 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
378 | |||
379 | /* Initialize the BD for every fragment in the page. */ | ||
380 | bdp->cbd_sc = 0; | ||
381 | if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) { | ||
382 | dev_kfree_skb_any(fep->tx_skbuff[i]); | ||
383 | fep->tx_skbuff[i] = NULL; | ||
384 | } | ||
385 | bdp->cbd_bufaddr = 0; | ||
386 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
387 | } | ||
388 | |||
389 | /* Set the last buffer to wrap */ | ||
390 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
391 | bdp->cbd_sc |= BD_SC_WRAP; | ||
392 | fep->dirty_tx = bdp; | ||
393 | } | ||
394 | |||
348 | /* This function is called to start or restart the FEC during a link | 395 | /* This function is called to start or restart the FEC during a link |
349 | * change. This only happens when switching between half and full | 396 | * change. This only happens when switching between half and full |
350 | * duplex. | 397 | * duplex. |
@@ -388,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex) | |||
388 | /* Set maximum receive buffer size. */ | 435 | /* Set maximum receive buffer size. */ |
389 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); | 436 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); |
390 | 437 | ||
438 | fec_enet_bd_init(ndev); | ||
439 | |||
391 | /* Set receive and transmit descriptor base. */ | 440 | /* Set receive and transmit descriptor base. */ |
392 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); | 441 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); |
393 | if (fep->bufdesc_ex) | 442 | if (fep->bufdesc_ex) |
@@ -397,7 +446,6 @@ fec_restart(struct net_device *ndev, int duplex) | |||
397 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) | 446 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) |
398 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); | 447 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); |
399 | 448 | ||
400 | fep->cur_rx = fep->rx_bd_base; | ||
401 | 449 | ||
402 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { | 450 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { |
403 | if (fep->tx_skbuff[i]) { | 451 | if (fep->tx_skbuff[i]) { |
@@ -954,6 +1002,7 @@ static void fec_enet_adjust_link(struct net_device *ndev) | |||
954 | } else { | 1002 | } else { |
955 | if (fep->link) { | 1003 | if (fep->link) { |
956 | fec_stop(ndev); | 1004 | fec_stop(ndev); |
1005 | fep->link = phy_dev->link; | ||
957 | status_change = 1; | 1006 | status_change = 1; |
958 | } | 1007 | } |
959 | } | 1008 | } |
@@ -1597,8 +1646,6 @@ static int fec_enet_init(struct net_device *ndev) | |||
1597 | { | 1646 | { |
1598 | struct fec_enet_private *fep = netdev_priv(ndev); | 1647 | struct fec_enet_private *fep = netdev_priv(ndev); |
1599 | struct bufdesc *cbd_base; | 1648 | struct bufdesc *cbd_base; |
1600 | struct bufdesc *bdp; | ||
1601 | unsigned int i; | ||
1602 | 1649 | ||
1603 | /* Allocate memory for buffer descriptors. */ | 1650 | /* Allocate memory for buffer descriptors. */ |
1604 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, | 1651 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, |
@@ -1608,6 +1655,7 @@ static int fec_enet_init(struct net_device *ndev) | |||
1608 | return -ENOMEM; | 1655 | return -ENOMEM; |
1609 | } | 1656 | } |
1610 | 1657 | ||
1658 | memset(cbd_base, 0, PAGE_SIZE); | ||
1611 | spin_lock_init(&fep->hw_lock); | 1659 | spin_lock_init(&fep->hw_lock); |
1612 | 1660 | ||
1613 | fep->netdev = ndev; | 1661 | fep->netdev = ndev; |
@@ -1631,35 +1679,6 @@ static int fec_enet_init(struct net_device *ndev) | |||
1631 | writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); | 1679 | writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); |
1632 | netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); | 1680 | netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); |
1633 | 1681 | ||
1634 | /* Initialize the receive buffer descriptors. */ | ||
1635 | bdp = fep->rx_bd_base; | ||
1636 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1637 | |||
1638 | /* Initialize the BD for every fragment in the page. */ | ||
1639 | bdp->cbd_sc = 0; | ||
1640 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
1641 | } | ||
1642 | |||
1643 | /* Set the last buffer to wrap */ | ||
1644 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
1645 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1646 | |||
1647 | /* ...and the same for transmit */ | ||
1648 | bdp = fep->tx_bd_base; | ||
1649 | fep->cur_tx = bdp; | ||
1650 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1651 | |||
1652 | /* Initialize the BD for every fragment in the page. */ | ||
1653 | bdp->cbd_sc = 0; | ||
1654 | bdp->cbd_bufaddr = 0; | ||
1655 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
1656 | } | ||
1657 | |||
1658 | /* Set the last buffer to wrap */ | ||
1659 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
1660 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1661 | fep->dirty_tx = bdp; | ||
1662 | |||
1663 | fec_restart(ndev, 0); | 1682 | fec_restart(ndev, 0); |
1664 | 1683 | ||
1665 | return 0; | 1684 | return 0; |
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index ec800b093e7e..d2bea3f07c73 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c | |||
@@ -870,7 +870,7 @@ err_unlock: | |||
870 | } | 870 | } |
871 | 871 | ||
872 | static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, | 872 | static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, |
873 | void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) | 873 | int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) |
874 | { | 874 | { |
875 | struct cb *cb; | 875 | struct cb *cb; |
876 | unsigned long flags; | 876 | unsigned long flags; |
@@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, | |||
888 | nic->cbs_avail--; | 888 | nic->cbs_avail--; |
889 | cb->skb = skb; | 889 | cb->skb = skb; |
890 | 890 | ||
891 | err = cb_prepare(nic, cb, skb); | ||
892 | if (err) | ||
893 | goto err_unlock; | ||
894 | |||
891 | if (unlikely(!nic->cbs_avail)) | 895 | if (unlikely(!nic->cbs_avail)) |
892 | err = -ENOSPC; | 896 | err = -ENOSPC; |
893 | 897 | ||
894 | cb_prepare(nic, cb, skb); | ||
895 | 898 | ||
896 | /* Order is important otherwise we'll be in a race with h/w: | 899 | /* Order is important otherwise we'll be in a race with h/w: |
897 | * set S-bit in current first, then clear S-bit in previous. */ | 900 | * set S-bit in current first, then clear S-bit in previous. */ |
@@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic) | |||
1091 | nic->mii.mdio_write = mdio_write; | 1094 | nic->mii.mdio_write = mdio_write; |
1092 | } | 1095 | } |
1093 | 1096 | ||
1094 | static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1097 | static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1095 | { | 1098 | { |
1096 | struct config *config = &cb->u.config; | 1099 | struct config *config = &cb->u.config; |
1097 | u8 *c = (u8 *)config; | 1100 | u8 *c = (u8 *)config; |
@@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) | |||
1181 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, | 1184 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1182 | "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", | 1185 | "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", |
1183 | c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); | 1186 | c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); |
1187 | return 0; | ||
1184 | } | 1188 | } |
1185 | 1189 | ||
1186 | /************************************************************************* | 1190 | /************************************************************************* |
@@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic) | |||
1331 | return fw; | 1335 | return fw; |
1332 | } | 1336 | } |
1333 | 1337 | ||
1334 | static void e100_setup_ucode(struct nic *nic, struct cb *cb, | 1338 | static int e100_setup_ucode(struct nic *nic, struct cb *cb, |
1335 | struct sk_buff *skb) | 1339 | struct sk_buff *skb) |
1336 | { | 1340 | { |
1337 | const struct firmware *fw = (void *)skb; | 1341 | const struct firmware *fw = (void *)skb; |
@@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, | |||
1358 | cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); | 1362 | cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); |
1359 | 1363 | ||
1360 | cb->command = cpu_to_le16(cb_ucode | cb_el); | 1364 | cb->command = cpu_to_le16(cb_ucode | cb_el); |
1365 | return 0; | ||
1361 | } | 1366 | } |
1362 | 1367 | ||
1363 | static inline int e100_load_ucode_wait(struct nic *nic) | 1368 | static inline int e100_load_ucode_wait(struct nic *nic) |
@@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic) | |||
1400 | return err; | 1405 | return err; |
1401 | } | 1406 | } |
1402 | 1407 | ||
1403 | static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, | 1408 | static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, |
1404 | struct sk_buff *skb) | 1409 | struct sk_buff *skb) |
1405 | { | 1410 | { |
1406 | cb->command = cpu_to_le16(cb_iaaddr); | 1411 | cb->command = cpu_to_le16(cb_iaaddr); |
1407 | memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); | 1412 | memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); |
1413 | return 0; | ||
1408 | } | 1414 | } |
1409 | 1415 | ||
1410 | static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1416 | static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1411 | { | 1417 | { |
1412 | cb->command = cpu_to_le16(cb_dump); | 1418 | cb->command = cpu_to_le16(cb_dump); |
1413 | cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + | 1419 | cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + |
1414 | offsetof(struct mem, dump_buf)); | 1420 | offsetof(struct mem, dump_buf)); |
1421 | return 0; | ||
1415 | } | 1422 | } |
1416 | 1423 | ||
1417 | static int e100_phy_check_without_mii(struct nic *nic) | 1424 | static int e100_phy_check_without_mii(struct nic *nic) |
@@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic) | |||
1581 | return 0; | 1588 | return 0; |
1582 | } | 1589 | } |
1583 | 1590 | ||
1584 | static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1591 | static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1585 | { | 1592 | { |
1586 | struct net_device *netdev = nic->netdev; | 1593 | struct net_device *netdev = nic->netdev; |
1587 | struct netdev_hw_addr *ha; | 1594 | struct netdev_hw_addr *ha; |
@@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) | |||
1596 | memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, | 1603 | memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, |
1597 | ETH_ALEN); | 1604 | ETH_ALEN); |
1598 | } | 1605 | } |
1606 | return 0; | ||
1599 | } | 1607 | } |
1600 | 1608 | ||
1601 | static void e100_set_multicast_list(struct net_device *netdev) | 1609 | static void e100_set_multicast_list(struct net_device *netdev) |
@@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data) | |||
1756 | round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); | 1764 | round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); |
1757 | } | 1765 | } |
1758 | 1766 | ||
1759 | static void e100_xmit_prepare(struct nic *nic, struct cb *cb, | 1767 | static int e100_xmit_prepare(struct nic *nic, struct cb *cb, |
1760 | struct sk_buff *skb) | 1768 | struct sk_buff *skb) |
1761 | { | 1769 | { |
1770 | dma_addr_t dma_addr; | ||
1762 | cb->command = nic->tx_command; | 1771 | cb->command = nic->tx_command; |
1763 | 1772 | ||
1773 | dma_addr = pci_map_single(nic->pdev, | ||
1774 | skb->data, skb->len, PCI_DMA_TODEVICE); | ||
1775 | /* If we can't map the skb, have the upper layer try later */ | ||
1776 | if (pci_dma_mapping_error(nic->pdev, dma_addr)) | ||
1777 | return -ENOMEM; | ||
1778 | |||
1764 | /* | 1779 | /* |
1765 | * Use the last 4 bytes of the SKB payload packet as the CRC, used for | 1780 | * Use the last 4 bytes of the SKB payload packet as the CRC, used for |
1766 | * testing, ie sending frames with bad CRC. | 1781 | * testing, ie sending frames with bad CRC. |
@@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb, | |||
1777 | cb->u.tcb.tcb_byte_count = 0; | 1792 | cb->u.tcb.tcb_byte_count = 0; |
1778 | cb->u.tcb.threshold = nic->tx_threshold; | 1793 | cb->u.tcb.threshold = nic->tx_threshold; |
1779 | cb->u.tcb.tbd_count = 1; | 1794 | cb->u.tcb.tbd_count = 1; |
1780 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, | 1795 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); |
1781 | skb->data, skb->len, PCI_DMA_TODEVICE)); | ||
1782 | /* check for mapping failure? */ | ||
1783 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); | 1796 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); |
1784 | skb_tx_timestamp(skb); | 1797 | skb_tx_timestamp(skb); |
1798 | return 0; | ||
1785 | } | 1799 | } |
1786 | 1800 | ||
1787 | static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, | 1801 | static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 43462d596a4e..ffd287196bf8 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c | |||
@@ -1053,6 +1053,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1053 | txdr->buffer_info[i].dma = | 1053 | txdr->buffer_info[i].dma = |
1054 | dma_map_single(&pdev->dev, skb->data, skb->len, | 1054 | dma_map_single(&pdev->dev, skb->data, skb->len, |
1055 | DMA_TO_DEVICE); | 1055 | DMA_TO_DEVICE); |
1056 | if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { | ||
1057 | ret_val = 4; | ||
1058 | goto err_nomem; | ||
1059 | } | ||
1056 | tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); | 1060 | tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); |
1057 | tx_desc->lower.data = cpu_to_le32(skb->len); | 1061 | tx_desc->lower.data = cpu_to_le32(skb->len); |
1058 | tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | | 1062 | tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | |
@@ -1069,7 +1073,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1069 | rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), | 1073 | rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), |
1070 | GFP_KERNEL); | 1074 | GFP_KERNEL); |
1071 | if (!rxdr->buffer_info) { | 1075 | if (!rxdr->buffer_info) { |
1072 | ret_val = 4; | 1076 | ret_val = 5; |
1073 | goto err_nomem; | 1077 | goto err_nomem; |
1074 | } | 1078 | } |
1075 | 1079 | ||
@@ -1077,7 +1081,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1077 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, | 1081 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, |
1078 | GFP_KERNEL); | 1082 | GFP_KERNEL); |
1079 | if (!rxdr->desc) { | 1083 | if (!rxdr->desc) { |
1080 | ret_val = 5; | 1084 | ret_val = 6; |
1081 | goto err_nomem; | 1085 | goto err_nomem; |
1082 | } | 1086 | } |
1083 | memset(rxdr->desc, 0, rxdr->size); | 1087 | memset(rxdr->desc, 0, rxdr->size); |
@@ -1101,7 +1105,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1101 | 1105 | ||
1102 | skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); | 1106 | skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); |
1103 | if (!skb) { | 1107 | if (!skb) { |
1104 | ret_val = 6; | 1108 | ret_val = 7; |
1105 | goto err_nomem; | 1109 | goto err_nomem; |
1106 | } | 1110 | } |
1107 | skb_reserve(skb, NET_IP_ALIGN); | 1111 | skb_reserve(skb, NET_IP_ALIGN); |
@@ -1110,6 +1114,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1110 | rxdr->buffer_info[i].dma = | 1114 | rxdr->buffer_info[i].dma = |
1111 | dma_map_single(&pdev->dev, skb->data, | 1115 | dma_map_single(&pdev->dev, skb->data, |
1112 | E1000_RXBUFFER_2048, DMA_FROM_DEVICE); | 1116 | E1000_RXBUFFER_2048, DMA_FROM_DEVICE); |
1117 | if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { | ||
1118 | ret_val = 8; | ||
1119 | goto err_nomem; | ||
1120 | } | ||
1113 | rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); | 1121 | rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); |
1114 | memset(skb->data, 0x00, skb->len); | 1122 | memset(skb->data, 0x00, skb->len); |
1115 | } | 1123 | } |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 948b86ffa4f0..7e615e2bf7e6 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -848,11 +848,16 @@ check_page: | |||
848 | } | 848 | } |
849 | } | 849 | } |
850 | 850 | ||
851 | if (!buffer_info->dma) | 851 | if (!buffer_info->dma) { |
852 | buffer_info->dma = dma_map_page(&pdev->dev, | 852 | buffer_info->dma = dma_map_page(&pdev->dev, |
853 | buffer_info->page, 0, | 853 | buffer_info->page, 0, |
854 | PAGE_SIZE, | 854 | PAGE_SIZE, |
855 | DMA_FROM_DEVICE); | 855 | DMA_FROM_DEVICE); |
856 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | ||
857 | adapter->alloc_rx_buff_failed++; | ||
858 | break; | ||
859 | } | ||
860 | } | ||
856 | 861 | ||
857 | rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); | 862 | rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); |
858 | rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); | 863 | rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); |
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 25151401c2ab..ab577a763a20 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h | |||
@@ -284,18 +284,10 @@ struct igb_q_vector { | |||
284 | enum e1000_ring_flags_t { | 284 | enum e1000_ring_flags_t { |
285 | IGB_RING_FLAG_RX_SCTP_CSUM, | 285 | IGB_RING_FLAG_RX_SCTP_CSUM, |
286 | IGB_RING_FLAG_RX_LB_VLAN_BSWAP, | 286 | IGB_RING_FLAG_RX_LB_VLAN_BSWAP, |
287 | IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, | ||
288 | IGB_RING_FLAG_TX_CTX_IDX, | 287 | IGB_RING_FLAG_TX_CTX_IDX, |
289 | IGB_RING_FLAG_TX_DETECT_HANG | 288 | IGB_RING_FLAG_TX_DETECT_HANG |
290 | }; | 289 | }; |
291 | 290 | ||
292 | #define ring_uses_build_skb(ring) \ | ||
293 | test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) | ||
294 | #define set_ring_build_skb_enabled(ring) \ | ||
295 | set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) | ||
296 | #define clear_ring_build_skb_enabled(ring) \ | ||
297 | clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) | ||
298 | |||
299 | #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) | 291 | #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) |
300 | 292 | ||
301 | #define IGB_RX_DESC(R, i) \ | 293 | #define IGB_RX_DESC(R, i) \ |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8496adfc6a68..64f75291e3a5 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, | |||
3350 | wr32(E1000_RXDCTL(reg_idx), rxdctl); | 3350 | wr32(E1000_RXDCTL(reg_idx), rxdctl); |
3351 | } | 3351 | } |
3352 | 3352 | ||
3353 | static void igb_set_rx_buffer_len(struct igb_adapter *adapter, | ||
3354 | struct igb_ring *rx_ring) | ||
3355 | { | ||
3356 | #define IGB_MAX_BUILD_SKB_SIZE \ | ||
3357 | (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \ | ||
3358 | (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN)) | ||
3359 | |||
3360 | /* set build_skb flag */ | ||
3361 | if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE) | ||
3362 | set_ring_build_skb_enabled(rx_ring); | ||
3363 | else | ||
3364 | clear_ring_build_skb_enabled(rx_ring); | ||
3365 | } | ||
3366 | |||
3367 | /** | 3353 | /** |
3368 | * igb_configure_rx - Configure receive Unit after Reset | 3354 | * igb_configure_rx - Configure receive Unit after Reset |
3369 | * @adapter: board private structure | 3355 | * @adapter: board private structure |
@@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter) | |||
3383 | 3369 | ||
3384 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 3370 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
3385 | * the Base and Length of the Rx Descriptor Ring */ | 3371 | * the Base and Length of the Rx Descriptor Ring */ |
3386 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3372 | for (i = 0; i < adapter->num_rx_queues; i++) |
3387 | struct igb_ring *rx_ring = adapter->rx_ring[i]; | 3373 | igb_configure_rx_ring(adapter, adapter->rx_ring[i]); |
3388 | igb_set_rx_buffer_len(adapter, rx_ring); | ||
3389 | igb_configure_rx_ring(adapter, rx_ring); | ||
3390 | } | ||
3391 | } | 3374 | } |
3392 | 3375 | ||
3393 | /** | 3376 | /** |
@@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, | |||
6203 | return igb_can_reuse_rx_page(rx_buffer, page, truesize); | 6186 | return igb_can_reuse_rx_page(rx_buffer, page, truesize); |
6204 | } | 6187 | } |
6205 | 6188 | ||
6206 | static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring, | ||
6207 | union e1000_adv_rx_desc *rx_desc) | ||
6208 | { | ||
6209 | struct igb_rx_buffer *rx_buffer; | ||
6210 | struct sk_buff *skb; | ||
6211 | struct page *page; | ||
6212 | void *page_addr; | ||
6213 | unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); | ||
6214 | #if (PAGE_SIZE < 8192) | ||
6215 | unsigned int truesize = IGB_RX_BUFSZ; | ||
6216 | #else | ||
6217 | unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + | ||
6218 | SKB_DATA_ALIGN(NET_SKB_PAD + | ||
6219 | NET_IP_ALIGN + | ||
6220 | size); | ||
6221 | #endif | ||
6222 | |||
6223 | /* If we spanned a buffer we have a huge mess so test for it */ | ||
6224 | BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); | ||
6225 | |||
6226 | rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; | ||
6227 | page = rx_buffer->page; | ||
6228 | prefetchw(page); | ||
6229 | |||
6230 | page_addr = page_address(page) + rx_buffer->page_offset; | ||
6231 | |||
6232 | /* prefetch first cache line of first page */ | ||
6233 | prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN); | ||
6234 | #if L1_CACHE_BYTES < 128 | ||
6235 | prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN); | ||
6236 | #endif | ||
6237 | |||
6238 | /* build an skb to around the page buffer */ | ||
6239 | skb = build_skb(page_addr, truesize); | ||
6240 | if (unlikely(!skb)) { | ||
6241 | rx_ring->rx_stats.alloc_failed++; | ||
6242 | return NULL; | ||
6243 | } | ||
6244 | |||
6245 | /* we are reusing so sync this buffer for CPU use */ | ||
6246 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
6247 | rx_buffer->dma, | ||
6248 | rx_buffer->page_offset, | ||
6249 | IGB_RX_BUFSZ, | ||
6250 | DMA_FROM_DEVICE); | ||
6251 | |||
6252 | /* update pointers within the skb to store the data */ | ||
6253 | skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); | ||
6254 | __skb_put(skb, size); | ||
6255 | |||
6256 | /* pull timestamp out of packet data */ | ||
6257 | if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { | ||
6258 | igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); | ||
6259 | __skb_pull(skb, IGB_TS_HDR_LEN); | ||
6260 | } | ||
6261 | |||
6262 | if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) { | ||
6263 | /* hand second half of page back to the ring */ | ||
6264 | igb_reuse_rx_page(rx_ring, rx_buffer); | ||
6265 | } else { | ||
6266 | /* we are not reusing the buffer so unmap it */ | ||
6267 | dma_unmap_page(rx_ring->dev, rx_buffer->dma, | ||
6268 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
6269 | } | ||
6270 | |||
6271 | /* clear contents of buffer_info */ | ||
6272 | rx_buffer->dma = 0; | ||
6273 | rx_buffer->page = NULL; | ||
6274 | |||
6275 | return skb; | ||
6276 | } | ||
6277 | |||
6278 | static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, | 6189 | static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, |
6279 | union e1000_adv_rx_desc *rx_desc, | 6190 | union e1000_adv_rx_desc *rx_desc, |
6280 | struct sk_buff *skb) | 6191 | struct sk_buff *skb) |
@@ -6690,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) | |||
6690 | rmb(); | 6601 | rmb(); |
6691 | 6602 | ||
6692 | /* retrieve a buffer from the ring */ | 6603 | /* retrieve a buffer from the ring */ |
6693 | if (ring_uses_build_skb(rx_ring)) | 6604 | skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); |
6694 | skb = igb_build_rx_buffer(rx_ring, rx_desc); | ||
6695 | else | ||
6696 | skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); | ||
6697 | 6605 | ||
6698 | /* exit if we failed to retrieve a buffer */ | 6606 | /* exit if we failed to retrieve a buffer */ |
6699 | if (!skb) | 6607 | if (!skb) |
@@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, | |||
6780 | return true; | 6688 | return true; |
6781 | } | 6689 | } |
6782 | 6690 | ||
6783 | static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) | ||
6784 | { | ||
6785 | if (ring_uses_build_skb(rx_ring)) | ||
6786 | return NET_SKB_PAD + NET_IP_ALIGN; | ||
6787 | else | ||
6788 | return 0; | ||
6789 | } | ||
6790 | |||
6791 | /** | 6691 | /** |
6792 | * igb_alloc_rx_buffers - Replace used receive buffers; packet split | 6692 | * igb_alloc_rx_buffers - Replace used receive buffers; packet split |
6793 | * @adapter: address of board private structure | 6693 | * @adapter: address of board private structure |
@@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) | |||
6814 | * Refresh the desc even if buffer_addrs didn't change | 6714 | * Refresh the desc even if buffer_addrs didn't change |
6815 | * because each write-back erases this info. | 6715 | * because each write-back erases this info. |
6816 | */ | 6716 | */ |
6817 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + | 6717 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); |
6818 | bi->page_offset + | ||
6819 | igb_rx_offset(rx_ring)); | ||
6820 | 6718 | ||
6821 | rx_desc++; | 6719 | rx_desc++; |
6822 | bi++; | 6720 | bi++; |
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index ea4808373435..b5f94abe3cff 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c | |||
@@ -2159,6 +2159,10 @@ map_skb: | |||
2159 | skb->data, | 2159 | skb->data, |
2160 | adapter->rx_buffer_len, | 2160 | adapter->rx_buffer_len, |
2161 | DMA_FROM_DEVICE); | 2161 | DMA_FROM_DEVICE); |
2162 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | ||
2163 | adapter->alloc_rx_buff_failed++; | ||
2164 | break; | ||
2165 | } | ||
2162 | 2166 | ||
2163 | rx_desc = IXGB_RX_DESC(*rx_ring, i); | 2167 | rx_desc = IXGB_RX_DESC(*rx_ring, i); |
2164 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); | 2168 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); |
@@ -2168,7 +2172,8 @@ map_skb: | |||
2168 | rx_desc->status = 0; | 2172 | rx_desc->status = 0; |
2169 | 2173 | ||
2170 | 2174 | ||
2171 | if (++i == rx_ring->count) i = 0; | 2175 | if (++i == rx_ring->count) |
2176 | i = 0; | ||
2172 | buffer_info = &rx_ring->buffer_info[i]; | 2177 | buffer_info = &rx_ring->buffer_info[i]; |
2173 | } | 2178 | } |
2174 | 2179 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index db5611ae407e..79f4a26ea6cc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -7922,12 +7922,19 @@ static int __init ixgbe_init_module(void) | |||
7922 | ixgbe_dbg_init(); | 7922 | ixgbe_dbg_init(); |
7923 | #endif /* CONFIG_DEBUG_FS */ | 7923 | #endif /* CONFIG_DEBUG_FS */ |
7924 | 7924 | ||
7925 | ret = pci_register_driver(&ixgbe_driver); | ||
7926 | if (ret) { | ||
7927 | #ifdef CONFIG_DEBUG_FS | ||
7928 | ixgbe_dbg_exit(); | ||
7929 | #endif /* CONFIG_DEBUG_FS */ | ||
7930 | return ret; | ||
7931 | } | ||
7932 | |||
7925 | #ifdef CONFIG_IXGBE_DCA | 7933 | #ifdef CONFIG_IXGBE_DCA |
7926 | dca_register_notify(&dca_notifier); | 7934 | dca_register_notify(&dca_notifier); |
7927 | #endif | 7935 | #endif |
7928 | 7936 | ||
7929 | ret = pci_register_driver(&ixgbe_driver); | 7937 | return 0; |
7930 | return ret; | ||
7931 | } | 7938 | } |
7932 | 7939 | ||
7933 | module_init(ixgbe_init_module); | 7940 | module_init(ixgbe_init_module); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d44b4d21268c..97e33669c0b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | |||
@@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) | |||
1049 | if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) | 1049 | if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) |
1050 | return -EINVAL; | 1050 | return -EINVAL; |
1051 | if (vlan || qos) { | 1051 | if (vlan || qos) { |
1052 | if (adapter->vfinfo[vf].pf_vlan) | ||
1053 | err = ixgbe_set_vf_vlan(adapter, false, | ||
1054 | adapter->vfinfo[vf].pf_vlan, | ||
1055 | vf); | ||
1056 | if (err) | ||
1057 | goto out; | ||
1052 | err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); | 1058 | err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); |
1053 | if (err) | 1059 | if (err) |
1054 | goto out; | 1060 | goto out; |
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index edfba9370922..434e33c527df 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig | |||
@@ -33,6 +33,7 @@ config MV643XX_ETH | |||
33 | 33 | ||
34 | config MVMDIO | 34 | config MVMDIO |
35 | tristate "Marvell MDIO interface support" | 35 | tristate "Marvell MDIO interface support" |
36 | select PHYLIB | ||
36 | ---help--- | 37 | ---help--- |
37 | This driver supports the MDIO interface found in the network | 38 | This driver supports the MDIO interface found in the network |
38 | interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, | 39 | interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, |
@@ -45,7 +46,6 @@ config MVMDIO | |||
45 | config MVNETA | 46 | config MVNETA |
46 | tristate "Marvell Armada 370/XP network interface support" | 47 | tristate "Marvell Armada 370/XP network interface support" |
47 | depends on MACH_ARMADA_370_XP | 48 | depends on MACH_ARMADA_370_XP |
48 | select PHYLIB | ||
49 | select MVMDIO | 49 | select MVMDIO |
50 | ---help--- | 50 | ---help--- |
51 | This driver supports the network interface units in the | 51 | This driver supports the network interface units in the |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index cd345b8969bc..a47a097c21e1 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -374,7 +374,6 @@ static int rxq_number = 8; | |||
374 | static int txq_number = 8; | 374 | static int txq_number = 8; |
375 | 375 | ||
376 | static int rxq_def; | 376 | static int rxq_def; |
377 | static int txq_def; | ||
378 | 377 | ||
379 | #define MVNETA_DRIVER_NAME "mvneta" | 378 | #define MVNETA_DRIVER_NAME "mvneta" |
380 | #define MVNETA_DRIVER_VERSION "1.0" | 379 | #define MVNETA_DRIVER_VERSION "1.0" |
@@ -1475,7 +1474,8 @@ error: | |||
1475 | static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) | 1474 | static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) |
1476 | { | 1475 | { |
1477 | struct mvneta_port *pp = netdev_priv(dev); | 1476 | struct mvneta_port *pp = netdev_priv(dev); |
1478 | struct mvneta_tx_queue *txq = &pp->txqs[txq_def]; | 1477 | u16 txq_id = skb_get_queue_mapping(skb); |
1478 | struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; | ||
1479 | struct mvneta_tx_desc *tx_desc; | 1479 | struct mvneta_tx_desc *tx_desc; |
1480 | struct netdev_queue *nq; | 1480 | struct netdev_queue *nq; |
1481 | int frags = 0; | 1481 | int frags = 0; |
@@ -1485,7 +1485,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) | |||
1485 | goto out; | 1485 | goto out; |
1486 | 1486 | ||
1487 | frags = skb_shinfo(skb)->nr_frags + 1; | 1487 | frags = skb_shinfo(skb)->nr_frags + 1; |
1488 | nq = netdev_get_tx_queue(dev, txq_def); | 1488 | nq = netdev_get_tx_queue(dev, txq_id); |
1489 | 1489 | ||
1490 | /* Get a descriptor for the first part of the packet */ | 1490 | /* Get a descriptor for the first part of the packet */ |
1491 | tx_desc = mvneta_txq_next_desc_get(txq); | 1491 | tx_desc = mvneta_txq_next_desc_get(txq); |
@@ -2689,7 +2689,7 @@ static int mvneta_probe(struct platform_device *pdev) | |||
2689 | return -EINVAL; | 2689 | return -EINVAL; |
2690 | } | 2690 | } |
2691 | 2691 | ||
2692 | dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8); | 2692 | dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); |
2693 | if (!dev) | 2693 | if (!dev) |
2694 | return -ENOMEM; | 2694 | return -ENOMEM; |
2695 | 2695 | ||
@@ -2771,16 +2771,17 @@ static int mvneta_probe(struct platform_device *pdev) | |||
2771 | 2771 | ||
2772 | netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); | 2772 | netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); |
2773 | 2773 | ||
2774 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2775 | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2776 | dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2777 | dev->priv_flags |= IFF_UNICAST_FLT; | ||
2778 | |||
2774 | err = register_netdev(dev); | 2779 | err = register_netdev(dev); |
2775 | if (err < 0) { | 2780 | if (err < 0) { |
2776 | dev_err(&pdev->dev, "failed to register\n"); | 2781 | dev_err(&pdev->dev, "failed to register\n"); |
2777 | goto err_deinit; | 2782 | goto err_deinit; |
2778 | } | 2783 | } |
2779 | 2784 | ||
2780 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2781 | dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2782 | dev->priv_flags |= IFF_UNICAST_FLT; | ||
2783 | |||
2784 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); | 2785 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); |
2785 | 2786 | ||
2786 | platform_set_drvdata(pdev, pp->dev); | 2787 | platform_set_drvdata(pdev, pp->dev); |
@@ -2843,4 +2844,3 @@ module_param(rxq_number, int, S_IRUGO); | |||
2843 | module_param(txq_number, int, S_IRUGO); | 2844 | module_param(txq_number, int, S_IRUGO); |
2844 | 2845 | ||
2845 | module_param(rxq_def, int, S_IRUGO); | 2846 | module_param(rxq_def, int, S_IRUGO); |
2846 | module_param(txq_def, int, S_IRUGO); | ||
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index fc07ca35721b..6a0e671fcecd 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) | |||
1067 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); | 1067 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); |
1068 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); | 1068 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); |
1069 | 1069 | ||
1070 | tp = space - 2048/8; | 1070 | tp = space - 8192/8; |
1071 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); | 1071 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); |
1072 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); | 1072 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); |
1073 | } else { | 1073 | } else { |
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index 615ac63ea860..ec6dcd80152b 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h | |||
@@ -2074,7 +2074,7 @@ enum { | |||
2074 | GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ | 2074 | GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ |
2075 | GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ | 2075 | GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ |
2076 | 2076 | ||
2077 | #define GMAC_DEF_MSK GM_IS_TX_FF_UR | 2077 | #define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR) |
2078 | }; | 2078 | }; |
2079 | 2079 | ||
2080 | /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ | 2080 | /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f278b10ef714..30d78f806dc3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -411,8 +411,8 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
411 | 411 | ||
412 | static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) | 412 | static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) |
413 | { | 413 | { |
414 | unsigned int i; | 414 | int i; |
415 | for (i = ETH_ALEN - 1; i; --i) { | 415 | for (i = ETH_ALEN - 1; i >= 0; --i) { |
416 | dst_mac[i] = src_mac & 0xff; | 416 | dst_mac[i] = src_mac & 0xff; |
417 | src_mac >>= 8; | 417 | src_mac >>= 8; |
418 | } | 418 | } |
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 33bcb63d56a2..8fb481252e2c 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c | |||
@@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) | |||
528 | for (; rxfc != 0; rxfc--) { | 528 | for (; rxfc != 0; rxfc--) { |
529 | rxh = ks8851_rdreg32(ks, KS_RXFHSR); | 529 | rxh = ks8851_rdreg32(ks, KS_RXFHSR); |
530 | rxstat = rxh & 0xffff; | 530 | rxstat = rxh & 0xffff; |
531 | rxlen = rxh >> 16; | 531 | rxlen = (rxh >> 16) & 0xfff; |
532 | 532 | ||
533 | netif_dbg(ks, rx_status, ks->netdev, | 533 | netif_dbg(ks, rx_status, ks->netdev, |
534 | "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); | 534 | "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index cd5ae8813cb3..edd63f1230f3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
@@ -1500,6 +1500,12 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) | |||
1500 | } | 1500 | } |
1501 | } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); | 1501 | } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); |
1502 | 1502 | ||
1503 | /* Make sure carrier is off and queue is stopped during loopback */ | ||
1504 | if (netif_running(netdev)) { | ||
1505 | netif_carrier_off(netdev); | ||
1506 | netif_stop_queue(netdev); | ||
1507 | } | ||
1508 | |||
1503 | ret = qlcnic_do_lb_test(adapter, mode); | 1509 | ret = qlcnic_do_lb_test(adapter, mode); |
1504 | 1510 | ||
1505 | qlcnic_83xx_clear_lb_mode(adapter, mode); | 1511 | qlcnic_83xx_clear_lb_mode(adapter, mode); |
@@ -2780,6 +2786,7 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter, | |||
2780 | void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) | 2786 | void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) |
2781 | { | 2787 | { |
2782 | struct qlcnic_cmd_args cmd; | 2788 | struct qlcnic_cmd_args cmd; |
2789 | struct net_device *netdev = adapter->netdev; | ||
2783 | int ret = 0; | 2790 | int ret = 0; |
2784 | 2791 | ||
2785 | qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); | 2792 | qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); |
@@ -2789,7 +2796,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) | |||
2789 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, | 2796 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, |
2790 | QLC_83XX_STAT_TX, &ret); | 2797 | QLC_83XX_STAT_TX, &ret); |
2791 | if (ret) { | 2798 | if (ret) { |
2792 | dev_info(&adapter->pdev->dev, "Error getting MAC stats\n"); | 2799 | netdev_err(netdev, "Error getting Tx stats\n"); |
2793 | goto out; | 2800 | goto out; |
2794 | } | 2801 | } |
2795 | /* Get MAC stats */ | 2802 | /* Get MAC stats */ |
@@ -2799,8 +2806,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) | |||
2799 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, | 2806 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, |
2800 | QLC_83XX_STAT_MAC, &ret); | 2807 | QLC_83XX_STAT_MAC, &ret); |
2801 | if (ret) { | 2808 | if (ret) { |
2802 | dev_info(&adapter->pdev->dev, | 2809 | netdev_err(netdev, "Error getting MAC stats\n"); |
2803 | "Error getting Rx stats\n"); | ||
2804 | goto out; | 2810 | goto out; |
2805 | } | 2811 | } |
2806 | /* Get Rx stats */ | 2812 | /* Get Rx stats */ |
@@ -2810,8 +2816,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) | |||
2810 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, | 2816 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, |
2811 | QLC_83XX_STAT_RX, &ret); | 2817 | QLC_83XX_STAT_RX, &ret); |
2812 | if (ret) | 2818 | if (ret) |
2813 | dev_info(&adapter->pdev->dev, | 2819 | netdev_err(netdev, "Error getting Rx stats\n"); |
2814 | "Error getting Tx stats\n"); | ||
2815 | out: | 2820 | out: |
2816 | qlcnic_free_mbx_args(&cmd); | 2821 | qlcnic_free_mbx_args(&cmd); |
2817 | } | 2822 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 0e630061bff3..5fa847fe388a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -358,8 +358,7 @@ set_flags: | |||
358 | memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); | 358 | memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); |
359 | } | 359 | } |
360 | opcode = TX_ETHER_PKT; | 360 | opcode = TX_ETHER_PKT; |
361 | if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && | 361 | if (skb_is_gso(skb)) { |
362 | skb_shinfo(skb)->gso_size > 0) { | ||
363 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 362 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
364 | first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | 363 | first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); |
365 | first_desc->total_hdr_length = hdr_len; | 364 | first_desc->total_hdr_length = hdr_len; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 987fb6f8adc3..5ef328af61d0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | |||
@@ -200,10 +200,10 @@ beacon_err: | |||
200 | } | 200 | } |
201 | 201 | ||
202 | err = qlcnic_config_led(adapter, b_state, b_rate); | 202 | err = qlcnic_config_led(adapter, b_state, b_rate); |
203 | if (!err) | 203 | if (!err) { |
204 | err = len; | 204 | err = len; |
205 | else | ||
206 | ahw->beacon_state = b_state; | 205 | ahw->beacon_state = b_state; |
206 | } | ||
207 | 207 | ||
208 | if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) | 208 | if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) |
209 | qlcnic_diag_free_res(adapter->netdev, max_sds_rings); | 209 | qlcnic_diag_free_res(adapter->netdev, max_sds_rings); |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index a131d7b5d2fe..7e8d68263963 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | #define DRV_NAME "qlge" | 19 | #define DRV_NAME "qlge" |
20 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " | 20 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " |
21 | #define DRV_VERSION "v1.00.00.31" | 21 | #define DRV_VERSION "v1.00.00.32" |
22 | 22 | ||
23 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ | 23 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ |
24 | 24 | ||
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 6f316ab23257..0780e039b271 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c | |||
@@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev, | |||
379 | 379 | ||
380 | ecmd->supported = SUPPORTED_10000baseT_Full; | 380 | ecmd->supported = SUPPORTED_10000baseT_Full; |
381 | ecmd->advertising = ADVERTISED_10000baseT_Full; | 381 | ecmd->advertising = ADVERTISED_10000baseT_Full; |
382 | ecmd->autoneg = AUTONEG_ENABLE; | ||
383 | ecmd->transceiver = XCVR_EXTERNAL; | 382 | ecmd->transceiver = XCVR_EXTERNAL; |
384 | if ((qdev->link_status & STS_LINK_TYPE_MASK) == | 383 | if ((qdev->link_status & STS_LINK_TYPE_MASK) == |
385 | STS_LINK_TYPE_10GBASET) { | 384 | STS_LINK_TYPE_10GBASET) { |
386 | ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); | 385 | ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); |
387 | ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); | 386 | ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); |
388 | ecmd->port = PORT_TP; | 387 | ecmd->port = PORT_TP; |
388 | ecmd->autoneg = AUTONEG_ENABLE; | ||
389 | } else { | 389 | } else { |
390 | ecmd->supported |= SUPPORTED_FIBRE; | 390 | ecmd->supported |= SUPPORTED_FIBRE; |
391 | ecmd->advertising |= ADVERTISED_FIBRE; | 391 | ecmd->advertising |= ADVERTISED_FIBRE; |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index b13ab544a7eb..8033555e53c2 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -1434,11 +1434,13 @@ map_error: | |||
1434 | } | 1434 | } |
1435 | 1435 | ||
1436 | /* Categorizing receive firmware frame errors */ | 1436 | /* Categorizing receive firmware frame errors */ |
1437 | static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err) | 1437 | static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err, |
1438 | struct rx_ring *rx_ring) | ||
1438 | { | 1439 | { |
1439 | struct nic_stats *stats = &qdev->nic_stats; | 1440 | struct nic_stats *stats = &qdev->nic_stats; |
1440 | 1441 | ||
1441 | stats->rx_err_count++; | 1442 | stats->rx_err_count++; |
1443 | rx_ring->rx_errors++; | ||
1442 | 1444 | ||
1443 | switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) { | 1445 | switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) { |
1444 | case IB_MAC_IOCB_RSP_ERR_CODE_ERR: | 1446 | case IB_MAC_IOCB_RSP_ERR_CODE_ERR: |
@@ -1474,6 +1476,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, | |||
1474 | struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); | 1476 | struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); |
1475 | struct napi_struct *napi = &rx_ring->napi; | 1477 | struct napi_struct *napi = &rx_ring->napi; |
1476 | 1478 | ||
1479 | /* Frame error, so drop the packet. */ | ||
1480 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
1481 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); | ||
1482 | put_page(lbq_desc->p.pg_chunk.page); | ||
1483 | return; | ||
1484 | } | ||
1477 | napi->dev = qdev->ndev; | 1485 | napi->dev = qdev->ndev; |
1478 | 1486 | ||
1479 | skb = napi_get_frags(napi); | 1487 | skb = napi_get_frags(napi); |
@@ -1529,6 +1537,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, | |||
1529 | addr = lbq_desc->p.pg_chunk.va; | 1537 | addr = lbq_desc->p.pg_chunk.va; |
1530 | prefetch(addr); | 1538 | prefetch(addr); |
1531 | 1539 | ||
1540 | /* Frame error, so drop the packet. */ | ||
1541 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
1542 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); | ||
1543 | goto err_out; | ||
1544 | } | ||
1545 | |||
1532 | /* The max framesize filter on this chip is set higher than | 1546 | /* The max framesize filter on this chip is set higher than |
1533 | * MTU since FCoE uses 2k frames. | 1547 | * MTU since FCoE uses 2k frames. |
1534 | */ | 1548 | */ |
@@ -1614,6 +1628,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, | |||
1614 | memcpy(skb_put(new_skb, length), skb->data, length); | 1628 | memcpy(skb_put(new_skb, length), skb->data, length); |
1615 | skb = new_skb; | 1629 | skb = new_skb; |
1616 | 1630 | ||
1631 | /* Frame error, so drop the packet. */ | ||
1632 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
1633 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); | ||
1634 | dev_kfree_skb_any(skb); | ||
1635 | return; | ||
1636 | } | ||
1637 | |||
1617 | /* loopback self test for ethtool */ | 1638 | /* loopback self test for ethtool */ |
1618 | if (test_bit(QL_SELFTEST, &qdev->flags)) { | 1639 | if (test_bit(QL_SELFTEST, &qdev->flags)) { |
1619 | ql_check_lb_frame(qdev, skb); | 1640 | ql_check_lb_frame(qdev, skb); |
@@ -1919,6 +1940,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, | |||
1919 | return; | 1940 | return; |
1920 | } | 1941 | } |
1921 | 1942 | ||
1943 | /* Frame error, so drop the packet. */ | ||
1944 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
1945 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); | ||
1946 | dev_kfree_skb_any(skb); | ||
1947 | return; | ||
1948 | } | ||
1949 | |||
1922 | /* The max framesize filter on this chip is set higher than | 1950 | /* The max framesize filter on this chip is set higher than |
1923 | * MTU since FCoE uses 2k frames. | 1951 | * MTU since FCoE uses 2k frames. |
1924 | */ | 1952 | */ |
@@ -2000,12 +2028,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, | |||
2000 | 2028 | ||
2001 | QL_DUMP_IB_MAC_RSP(ib_mac_rsp); | 2029 | QL_DUMP_IB_MAC_RSP(ib_mac_rsp); |
2002 | 2030 | ||
2003 | /* Frame error, so drop the packet. */ | ||
2004 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
2005 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2); | ||
2006 | return (unsigned long)length; | ||
2007 | } | ||
2008 | |||
2009 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { | 2031 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { |
2010 | /* The data and headers are split into | 2032 | /* The data and headers are split into |
2011 | * separate buffers. | 2033 | * separate buffers. |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 28fb50a1e9c3..4ecbe64a758d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -3818,6 +3818,30 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) | |||
3818 | } | 3818 | } |
3819 | } | 3819 | } |
3820 | 3820 | ||
3821 | static void rtl_speed_down(struct rtl8169_private *tp) | ||
3822 | { | ||
3823 | u32 adv; | ||
3824 | int lpa; | ||
3825 | |||
3826 | rtl_writephy(tp, 0x1f, 0x0000); | ||
3827 | lpa = rtl_readphy(tp, MII_LPA); | ||
3828 | |||
3829 | if (lpa & (LPA_10HALF | LPA_10FULL)) | ||
3830 | adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; | ||
3831 | else if (lpa & (LPA_100HALF | LPA_100FULL)) | ||
3832 | adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | ||
3833 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; | ||
3834 | else | ||
3835 | adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | ||
3836 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | | ||
3837 | (tp->mii.supports_gmii ? | ||
3838 | ADVERTISED_1000baseT_Half | | ||
3839 | ADVERTISED_1000baseT_Full : 0); | ||
3840 | |||
3841 | rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, | ||
3842 | adv); | ||
3843 | } | ||
3844 | |||
3821 | static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) | 3845 | static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) |
3822 | { | 3846 | { |
3823 | void __iomem *ioaddr = tp->mmio_addr; | 3847 | void __iomem *ioaddr = tp->mmio_addr; |
@@ -3848,9 +3872,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) | |||
3848 | if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) | 3872 | if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) |
3849 | return false; | 3873 | return false; |
3850 | 3874 | ||
3851 | rtl_writephy(tp, 0x1f, 0x0000); | 3875 | rtl_speed_down(tp); |
3852 | rtl_writephy(tp, MII_BMCR, 0x0000); | ||
3853 | |||
3854 | rtl_wol_suspend_quirk(tp); | 3876 | rtl_wol_suspend_quirk(tp); |
3855 | 3877 | ||
3856 | return true; | 3878 | return true; |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index bf5e3cf97c4d..6ed333fe5c04 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -1216,10 +1216,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
1216 | if (felic_stat & ECSR_LCHNG) { | 1216 | if (felic_stat & ECSR_LCHNG) { |
1217 | /* Link Changed */ | 1217 | /* Link Changed */ |
1218 | if (mdp->cd->no_psr || mdp->no_ether_link) { | 1218 | if (mdp->cd->no_psr || mdp->no_ether_link) { |
1219 | if (mdp->link == PHY_DOWN) | 1219 | goto ignore_link; |
1220 | link_stat = 0; | ||
1221 | else | ||
1222 | link_stat = PHY_ST_LINK; | ||
1223 | } else { | 1220 | } else { |
1224 | link_stat = (sh_eth_read(ndev, PSR)); | 1221 | link_stat = (sh_eth_read(ndev, PSR)); |
1225 | if (mdp->ether_link_active_low) | 1222 | if (mdp->ether_link_active_low) |
@@ -1242,6 +1239,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
1242 | } | 1239 | } |
1243 | } | 1240 | } |
1244 | 1241 | ||
1242 | ignore_link: | ||
1245 | if (intr_status & EESR_TWB) { | 1243 | if (intr_status & EESR_TWB) { |
1246 | /* Write buck end. unused write back interrupt */ | 1244 | /* Write buck end. unused write back interrupt */ |
1247 | if (intr_status & EESR_TABT) /* Transmit Abort int */ | 1245 | if (intr_status & EESR_TABT) /* Transmit Abort int */ |
@@ -1326,12 +1324,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
1326 | struct sh_eth_private *mdp = netdev_priv(ndev); | 1324 | struct sh_eth_private *mdp = netdev_priv(ndev); |
1327 | struct sh_eth_cpu_data *cd = mdp->cd; | 1325 | struct sh_eth_cpu_data *cd = mdp->cd; |
1328 | irqreturn_t ret = IRQ_NONE; | 1326 | irqreturn_t ret = IRQ_NONE; |
1329 | u32 intr_status = 0; | 1327 | unsigned long intr_status; |
1330 | 1328 | ||
1331 | spin_lock(&mdp->lock); | 1329 | spin_lock(&mdp->lock); |
1332 | 1330 | ||
1333 | /* Get interrpt stat */ | 1331 | /* Get interrupt status */ |
1334 | intr_status = sh_eth_read(ndev, EESR); | 1332 | intr_status = sh_eth_read(ndev, EESR); |
1333 | /* Mask it with the interrupt mask, forcing ECI interrupt to be always | ||
1334 | * enabled since it's the one that comes thru regardless of the mask, | ||
1335 | * and we need to fully handle it in sh_eth_error() in order to quench | ||
1336 | * it as it doesn't get cleared by just writing 1 to the ECI bit... | ||
1337 | */ | ||
1338 | intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI; | ||
1335 | /* Clear interrupt */ | 1339 | /* Clear interrupt */ |
1336 | if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | | 1340 | if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | |
1337 | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | | 1341 | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | |
@@ -1373,7 +1377,7 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
1373 | struct phy_device *phydev = mdp->phydev; | 1377 | struct phy_device *phydev = mdp->phydev; |
1374 | int new_state = 0; | 1378 | int new_state = 0; |
1375 | 1379 | ||
1376 | if (phydev->link != PHY_DOWN) { | 1380 | if (phydev->link) { |
1377 | if (phydev->duplex != mdp->duplex) { | 1381 | if (phydev->duplex != mdp->duplex) { |
1378 | new_state = 1; | 1382 | new_state = 1; |
1379 | mdp->duplex = phydev->duplex; | 1383 | mdp->duplex = phydev->duplex; |
@@ -1387,17 +1391,21 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
1387 | if (mdp->cd->set_rate) | 1391 | if (mdp->cd->set_rate) |
1388 | mdp->cd->set_rate(ndev); | 1392 | mdp->cd->set_rate(ndev); |
1389 | } | 1393 | } |
1390 | if (mdp->link == PHY_DOWN) { | 1394 | if (!mdp->link) { |
1391 | sh_eth_write(ndev, | 1395 | sh_eth_write(ndev, |
1392 | (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); | 1396 | (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); |
1393 | new_state = 1; | 1397 | new_state = 1; |
1394 | mdp->link = phydev->link; | 1398 | mdp->link = phydev->link; |
1399 | if (mdp->cd->no_psr || mdp->no_ether_link) | ||
1400 | sh_eth_rcv_snd_enable(ndev); | ||
1395 | } | 1401 | } |
1396 | } else if (mdp->link) { | 1402 | } else if (mdp->link) { |
1397 | new_state = 1; | 1403 | new_state = 1; |
1398 | mdp->link = PHY_DOWN; | 1404 | mdp->link = 0; |
1399 | mdp->speed = 0; | 1405 | mdp->speed = 0; |
1400 | mdp->duplex = -1; | 1406 | mdp->duplex = -1; |
1407 | if (mdp->cd->no_psr || mdp->no_ether_link) | ||
1408 | sh_eth_rcv_snd_disable(ndev); | ||
1401 | } | 1409 | } |
1402 | 1410 | ||
1403 | if (new_state && netif_msg_link(mdp)) | 1411 | if (new_state && netif_msg_link(mdp)) |
@@ -1414,7 +1422,7 @@ static int sh_eth_phy_init(struct net_device *ndev) | |||
1414 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, | 1422 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, |
1415 | mdp->mii_bus->id , mdp->phy_id); | 1423 | mdp->mii_bus->id , mdp->phy_id); |
1416 | 1424 | ||
1417 | mdp->link = PHY_DOWN; | 1425 | mdp->link = 0; |
1418 | mdp->speed = 0; | 1426 | mdp->speed = 0; |
1419 | mdp->duplex = -1; | 1427 | mdp->duplex = -1; |
1420 | 1428 | ||
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index e6655678458e..828be4515008 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -723,7 +723,7 @@ struct sh_eth_private { | |||
723 | u32 phy_id; /* PHY ID */ | 723 | u32 phy_id; /* PHY ID */ |
724 | struct mii_bus *mii_bus; /* MDIO bus control */ | 724 | struct mii_bus *mii_bus; /* MDIO bus control */ |
725 | struct phy_device *phydev; /* PHY device control */ | 725 | struct phy_device *phydev; /* PHY device control */ |
726 | enum phy_state link; | 726 | int link; |
727 | phy_interface_t phy_interface; | 727 | phy_interface_t phy_interface; |
728 | int msg_enable; | 728 | int msg_enable; |
729 | int speed; | 729 | int speed; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 0c74a702d461..50617c5a0bdb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c | |||
@@ -149,6 +149,7 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) | |||
149 | { | 149 | { |
150 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); | 150 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); |
151 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); | 151 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); |
152 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK); | ||
152 | } | 153 | } |
153 | 154 | ||
154 | /* This reads the MAC core counters (if actaully supported). | 155 | /* This reads the MAC core counters (if actaully supported). |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index df32a090d08e..4781d3d8e182 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status) | |||
436 | * queue is stopped then start the queue as we have free desc for tx | 436 | * queue is stopped then start the queue as we have free desc for tx |
437 | */ | 437 | */ |
438 | if (unlikely(netif_queue_stopped(ndev))) | 438 | if (unlikely(netif_queue_stopped(ndev))) |
439 | netif_start_queue(ndev); | 439 | netif_wake_queue(ndev); |
440 | cpts_tx_timestamp(priv->cpts, skb); | 440 | cpts_tx_timestamp(priv->cpts, skb); |
441 | priv->stats.tx_packets++; | 441 | priv->stats.tx_packets++; |
442 | priv->stats.tx_bytes += len; | 442 | priv->stats.tx_bytes += len; |
@@ -1380,7 +1380,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
1380 | memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); | 1380 | memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); |
1381 | 1381 | ||
1382 | if (data->dual_emac) { | 1382 | if (data->dual_emac) { |
1383 | if (of_property_read_u32(node, "dual_emac_res_vlan", | 1383 | if (of_property_read_u32(slave_node, "dual_emac_res_vlan", |
1384 | &prop)) { | 1384 | &prop)) { |
1385 | pr_err("Missing dual_emac_res_vlan in DT.\n"); | 1385 | pr_err("Missing dual_emac_res_vlan in DT.\n"); |
1386 | slave_data->dual_emac_res_vlan = i+1; | 1386 | slave_data->dual_emac_res_vlan = i+1; |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index ae1b77aa199f..72300bc9e378 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
@@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status) | |||
1053 | * queue is stopped then start the queue as we have free desc for tx | 1053 | * queue is stopped then start the queue as we have free desc for tx |
1054 | */ | 1054 | */ |
1055 | if (unlikely(netif_queue_stopped(ndev))) | 1055 | if (unlikely(netif_queue_stopped(ndev))) |
1056 | netif_start_queue(ndev); | 1056 | netif_wake_queue(ndev); |
1057 | ndev->stats.tx_packets++; | 1057 | ndev->stats.tx_packets++; |
1058 | ndev->stats.tx_bytes += len; | 1058 | ndev->stats.tx_bytes += len; |
1059 | dev_kfree_skb_any(skb); | 1059 | dev_kfree_skb_any(skb); |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 1cd77483da50..f5f0f09e4cc5 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -470,8 +470,10 @@ static void netvsc_send_completion(struct hv_device *device, | |||
470 | packet->trans_id; | 470 | packet->trans_id; |
471 | 471 | ||
472 | /* Notify the layer above us */ | 472 | /* Notify the layer above us */ |
473 | nvsc_packet->completion.send.send_completion( | 473 | if (nvsc_packet) |
474 | nvsc_packet->completion.send.send_completion_ctx); | 474 | nvsc_packet->completion.send.send_completion( |
475 | nvsc_packet->completion.send. | ||
476 | send_completion_ctx); | ||
475 | 477 | ||
476 | num_outstanding_sends = | 478 | num_outstanding_sends = |
477 | atomic_dec_return(&net_device->num_outstanding_sends); | 479 | atomic_dec_return(&net_device->num_outstanding_sends); |
@@ -498,6 +500,7 @@ int netvsc_send(struct hv_device *device, | |||
498 | int ret = 0; | 500 | int ret = 0; |
499 | struct nvsp_message sendMessage; | 501 | struct nvsp_message sendMessage; |
500 | struct net_device *ndev; | 502 | struct net_device *ndev; |
503 | u64 req_id; | ||
501 | 504 | ||
502 | net_device = get_outbound_net_device(device); | 505 | net_device = get_outbound_net_device(device); |
503 | if (!net_device) | 506 | if (!net_device) |
@@ -518,20 +521,24 @@ int netvsc_send(struct hv_device *device, | |||
518 | 0xFFFFFFFF; | 521 | 0xFFFFFFFF; |
519 | sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; | 522 | sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; |
520 | 523 | ||
524 | if (packet->completion.send.send_completion) | ||
525 | req_id = (u64)packet; | ||
526 | else | ||
527 | req_id = 0; | ||
528 | |||
521 | if (packet->page_buf_cnt) { | 529 | if (packet->page_buf_cnt) { |
522 | ret = vmbus_sendpacket_pagebuffer(device->channel, | 530 | ret = vmbus_sendpacket_pagebuffer(device->channel, |
523 | packet->page_buf, | 531 | packet->page_buf, |
524 | packet->page_buf_cnt, | 532 | packet->page_buf_cnt, |
525 | &sendMessage, | 533 | &sendMessage, |
526 | sizeof(struct nvsp_message), | 534 | sizeof(struct nvsp_message), |
527 | (unsigned long)packet); | 535 | req_id); |
528 | } else { | 536 | } else { |
529 | ret = vmbus_sendpacket(device->channel, &sendMessage, | 537 | ret = vmbus_sendpacket(device->channel, &sendMessage, |
530 | sizeof(struct nvsp_message), | 538 | sizeof(struct nvsp_message), |
531 | (unsigned long)packet, | 539 | req_id, |
532 | VM_PKT_DATA_INBAND, | 540 | VM_PKT_DATA_INBAND, |
533 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | 541 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
534 | |||
535 | } | 542 | } |
536 | 543 | ||
537 | if (ret == 0) { | 544 | if (ret == 0) { |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 5f85205cd12b..8341b62e5521 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -241,13 +241,11 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, | |||
241 | 241 | ||
242 | if (status == 1) { | 242 | if (status == 1) { |
243 | netif_carrier_on(net); | 243 | netif_carrier_on(net); |
244 | netif_wake_queue(net); | ||
245 | ndev_ctx = netdev_priv(net); | 244 | ndev_ctx = netdev_priv(net); |
246 | schedule_delayed_work(&ndev_ctx->dwork, 0); | 245 | schedule_delayed_work(&ndev_ctx->dwork, 0); |
247 | schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); | 246 | schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); |
248 | } else { | 247 | } else { |
249 | netif_carrier_off(net); | 248 | netif_carrier_off(net); |
250 | netif_tx_disable(net); | ||
251 | } | 249 | } |
252 | } | 250 | } |
253 | 251 | ||
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 2b657d4d63a8..0775f0aefd1e 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
@@ -61,9 +61,6 @@ struct rndis_request { | |||
61 | 61 | ||
62 | static void rndis_filter_send_completion(void *ctx); | 62 | static void rndis_filter_send_completion(void *ctx); |
63 | 63 | ||
64 | static void rndis_filter_send_request_completion(void *ctx); | ||
65 | |||
66 | |||
67 | 64 | ||
68 | static struct rndis_device *get_rndis_device(void) | 65 | static struct rndis_device *get_rndis_device(void) |
69 | { | 66 | { |
@@ -241,10 +238,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, | |||
241 | packet->page_buf[0].len; | 238 | packet->page_buf[0].len; |
242 | } | 239 | } |
243 | 240 | ||
244 | packet->completion.send.send_completion_ctx = req;/* packet; */ | 241 | packet->completion.send.send_completion = NULL; |
245 | packet->completion.send.send_completion = | ||
246 | rndis_filter_send_request_completion; | ||
247 | packet->completion.send.send_completion_tid = (unsigned long)dev; | ||
248 | 242 | ||
249 | ret = netvsc_send(dev->net_dev->dev, packet); | 243 | ret = netvsc_send(dev->net_dev->dev, packet); |
250 | return ret; | 244 | return ret; |
@@ -999,9 +993,3 @@ static void rndis_filter_send_completion(void *ctx) | |||
999 | /* Pass it back to the original handler */ | 993 | /* Pass it back to the original handler */ |
1000 | filter_pkt->completion(filter_pkt->completion_ctx); | 994 | filter_pkt->completion(filter_pkt->completion_ctx); |
1001 | } | 995 | } |
1002 | |||
1003 | |||
1004 | static void rndis_filter_send_request_completion(void *ctx) | ||
1005 | { | ||
1006 | /* Noop */ | ||
1007 | } | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b7c457adc0dc..729ed533bb33 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1594,7 +1594,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
1594 | 1594 | ||
1595 | if (tun->flags & TUN_TAP_MQ && | 1595 | if (tun->flags & TUN_TAP_MQ && |
1596 | (tun->numqueues + tun->numdisabled > 1)) | 1596 | (tun->numqueues + tun->numdisabled > 1)) |
1597 | return err; | 1597 | return -EBUSY; |
1598 | } | 1598 | } |
1599 | else { | 1599 | else { |
1600 | char *name; | 1600 | char *name; |
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 16c842997291..6bd91676d2cb 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c | |||
@@ -134,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb | |||
134 | goto error; | 134 | goto error; |
135 | 135 | ||
136 | if (skb) { | 136 | if (skb) { |
137 | if (skb->len <= sizeof(ETH_HLEN)) | 137 | if (skb->len <= ETH_HLEN) |
138 | goto error; | 138 | goto error; |
139 | 139 | ||
140 | /* mapping VLANs to MBIM sessions: | 140 | /* mapping VLANs to MBIM sessions: |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 968d5d50751d..2a3579f67910 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
15 | #include <linux/ethtool.h> | 15 | #include <linux/ethtool.h> |
16 | #include <linux/etherdevice.h> | ||
16 | #include <linux/mii.h> | 17 | #include <linux/mii.h> |
17 | #include <linux/usb.h> | 18 | #include <linux/usb.h> |
18 | #include <linux/usb/cdc.h> | 19 | #include <linux/usb/cdc.h> |
@@ -52,6 +53,96 @@ struct qmi_wwan_state { | |||
52 | struct usb_interface *data; | 53 | struct usb_interface *data; |
53 | }; | 54 | }; |
54 | 55 | ||
56 | /* default ethernet address used by the modem */ | ||
57 | static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; | ||
58 | |||
59 | /* Make up an ethernet header if the packet doesn't have one. | ||
60 | * | ||
61 | * A firmware bug common among several devices cause them to send raw | ||
62 | * IP packets under some circumstances. There is no way for the | ||
63 | * driver/host to know when this will happen. And even when the bug | ||
64 | * hits, some packets will still arrive with an intact header. | ||
65 | * | ||
66 | * The supported devices are only capably of sending IPv4, IPv6 and | ||
67 | * ARP packets on a point-to-point link. Any packet with an ethernet | ||
68 | * header will have either our address or a broadcast/multicast | ||
69 | * address as destination. ARP packets will always have a header. | ||
70 | * | ||
71 | * This means that this function will reliably add the appropriate | ||
72 | * header iff necessary, provided our hardware address does not start | ||
73 | * with 4 or 6. | ||
74 | * | ||
75 | * Another common firmware bug results in all packets being addressed | ||
76 | * to 00:a0:c6:00:00:00 despite the host address being different. | ||
77 | * This function will also fixup such packets. | ||
78 | */ | ||
79 | static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | ||
80 | { | ||
81 | __be16 proto; | ||
82 | |||
83 | /* usbnet rx_complete guarantees that skb->len is at least | ||
84 | * hard_header_len, so we can inspect the dest address without | ||
85 | * checking skb->len | ||
86 | */ | ||
87 | switch (skb->data[0] & 0xf0) { | ||
88 | case 0x40: | ||
89 | proto = htons(ETH_P_IP); | ||
90 | break; | ||
91 | case 0x60: | ||
92 | proto = htons(ETH_P_IPV6); | ||
93 | break; | ||
94 | case 0x00: | ||
95 | if (is_multicast_ether_addr(skb->data)) | ||
96 | return 1; | ||
97 | /* possibly bogus destination - rewrite just in case */ | ||
98 | skb_reset_mac_header(skb); | ||
99 | goto fix_dest; | ||
100 | default: | ||
101 | /* pass along other packets without modifications */ | ||
102 | return 1; | ||
103 | } | ||
104 | if (skb_headroom(skb) < ETH_HLEN) | ||
105 | return 0; | ||
106 | skb_push(skb, ETH_HLEN); | ||
107 | skb_reset_mac_header(skb); | ||
108 | eth_hdr(skb)->h_proto = proto; | ||
109 | memset(eth_hdr(skb)->h_source, 0, ETH_ALEN); | ||
110 | fix_dest: | ||
111 | memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); | ||
112 | return 1; | ||
113 | } | ||
114 | |||
115 | /* very simplistic detection of IPv4 or IPv6 headers */ | ||
116 | static bool possibly_iphdr(const char *data) | ||
117 | { | ||
118 | return (data[0] & 0xd0) == 0x40; | ||
119 | } | ||
120 | |||
121 | /* disallow addresses which may be confused with IP headers */ | ||
122 | static int qmi_wwan_mac_addr(struct net_device *dev, void *p) | ||
123 | { | ||
124 | int ret; | ||
125 | struct sockaddr *addr = p; | ||
126 | |||
127 | ret = eth_prepare_mac_addr_change(dev, p); | ||
128 | if (ret < 0) | ||
129 | return ret; | ||
130 | if (possibly_iphdr(addr->sa_data)) | ||
131 | return -EADDRNOTAVAIL; | ||
132 | eth_commit_mac_addr_change(dev, p); | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static const struct net_device_ops qmi_wwan_netdev_ops = { | ||
137 | .ndo_open = usbnet_open, | ||
138 | .ndo_stop = usbnet_stop, | ||
139 | .ndo_start_xmit = usbnet_start_xmit, | ||
140 | .ndo_tx_timeout = usbnet_tx_timeout, | ||
141 | .ndo_change_mtu = usbnet_change_mtu, | ||
142 | .ndo_set_mac_address = qmi_wwan_mac_addr, | ||
143 | .ndo_validate_addr = eth_validate_addr, | ||
144 | }; | ||
145 | |||
55 | /* using a counter to merge subdriver requests with our own into a combined state */ | 146 | /* using a counter to merge subdriver requests with our own into a combined state */ |
56 | static int qmi_wwan_manage_power(struct usbnet *dev, int on) | 147 | static int qmi_wwan_manage_power(struct usbnet *dev, int on) |
57 | { | 148 | { |
@@ -229,6 +320,18 @@ next_desc: | |||
229 | usb_driver_release_interface(driver, info->data); | 320 | usb_driver_release_interface(driver, info->data); |
230 | } | 321 | } |
231 | 322 | ||
323 | /* Never use the same address on both ends of the link, even | ||
324 | * if the buggy firmware told us to. | ||
325 | */ | ||
326 | if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr)) | ||
327 | eth_hw_addr_random(dev->net); | ||
328 | |||
329 | /* make MAC addr easily distinguishable from an IP header */ | ||
330 | if (possibly_iphdr(dev->net->dev_addr)) { | ||
331 | dev->net->dev_addr[0] |= 0x02; /* set local assignment bit */ | ||
332 | dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */ | ||
333 | } | ||
334 | dev->net->netdev_ops = &qmi_wwan_netdev_ops; | ||
232 | err: | 335 | err: |
233 | return status; | 336 | return status; |
234 | } | 337 | } |
@@ -307,6 +410,7 @@ static const struct driver_info qmi_wwan_info = { | |||
307 | .bind = qmi_wwan_bind, | 410 | .bind = qmi_wwan_bind, |
308 | .unbind = qmi_wwan_unbind, | 411 | .unbind = qmi_wwan_unbind, |
309 | .manage_power = qmi_wwan_manage_power, | 412 | .manage_power = qmi_wwan_manage_power, |
413 | .rx_fixup = qmi_wwan_rx_fixup, | ||
310 | }; | 414 | }; |
311 | 415 | ||
312 | #define HUAWEI_VENDOR_ID 0x12D1 | 416 | #define HUAWEI_VENDOR_ID 0x12D1 |
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 9abe51710f22..1a15ec14c386 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c | |||
@@ -914,8 +914,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) | |||
914 | static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) | 914 | static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) |
915 | { | 915 | { |
916 | struct usbnet *dev = netdev_priv(netdev); | 916 | struct usbnet *dev = netdev_priv(netdev); |
917 | int ret; | ||
918 | |||
919 | if (new_mtu > MAX_SINGLE_PACKET_SIZE) | ||
920 | return -EINVAL; | ||
917 | 921 | ||
918 | int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); | 922 | ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); |
919 | if (ret < 0) { | 923 | if (ret < 0) { |
920 | netdev_warn(dev->net, "Failed to set mac rx frame length\n"); | 924 | netdev_warn(dev->net, "Failed to set mac rx frame length\n"); |
921 | return ret; | 925 | return ret; |
@@ -1324,7 +1328,7 @@ static int smsc75xx_reset(struct usbnet *dev) | |||
1324 | 1328 | ||
1325 | netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); | 1329 | netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); |
1326 | 1330 | ||
1327 | ret = smsc75xx_set_rx_max_frame_length(dev, 1514); | 1331 | ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); |
1328 | if (ret < 0) { | 1332 | if (ret < 0) { |
1329 | netdev_warn(dev->net, "Failed to set max rx frame length\n"); | 1333 | netdev_warn(dev->net, "Failed to set max rx frame length\n"); |
1330 | return ret; | 1334 | return ret; |
@@ -2134,8 +2138,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
2134 | else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) | 2138 | else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) |
2135 | dev->net->stats.rx_frame_errors++; | 2139 | dev->net->stats.rx_frame_errors++; |
2136 | } else { | 2140 | } else { |
2137 | /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ | 2141 | /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */ |
2138 | if (unlikely(size > (ETH_FRAME_LEN + 12))) { | 2142 | if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) { |
2139 | netif_dbg(dev, rx_err, dev->net, | 2143 | netif_dbg(dev, rx_err, dev->net, |
2140 | "size err rx_cmd_a=0x%08x\n", | 2144 | "size err rx_cmd_a=0x%08x\n", |
2141 | rx_cmd_a); | 2145 | rx_cmd_a); |
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h index 28fd99203f64..bdee2ed67219 100644 --- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h | |||
@@ -519,7 +519,7 @@ static const u32 ar9580_1p0_mac_core[][2] = { | |||
519 | {0x00008258, 0x00000000}, | 519 | {0x00008258, 0x00000000}, |
520 | {0x0000825c, 0x40000000}, | 520 | {0x0000825c, 0x40000000}, |
521 | {0x00008260, 0x00080922}, | 521 | {0x00008260, 0x00080922}, |
522 | {0x00008264, 0x9bc00010}, | 522 | {0x00008264, 0x9d400010}, |
523 | {0x00008268, 0xffffffff}, | 523 | {0x00008268, 0xffffffff}, |
524 | {0x0000826c, 0x0000ffff}, | 524 | {0x0000826c, 0x0000ffff}, |
525 | {0x00008270, 0x00000000}, | 525 | {0x00008270, 0x00000000}, |
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c index 467b60014b7b..73fe8d6db566 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c | |||
@@ -143,14 +143,14 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq) | |||
143 | u32 sz, i; | 143 | u32 sz, i; |
144 | struct channel_detector *cd; | 144 | struct channel_detector *cd; |
145 | 145 | ||
146 | cd = kmalloc(sizeof(*cd), GFP_KERNEL); | 146 | cd = kmalloc(sizeof(*cd), GFP_ATOMIC); |
147 | if (cd == NULL) | 147 | if (cd == NULL) |
148 | goto fail; | 148 | goto fail; |
149 | 149 | ||
150 | INIT_LIST_HEAD(&cd->head); | 150 | INIT_LIST_HEAD(&cd->head); |
151 | cd->freq = freq; | 151 | cd->freq = freq; |
152 | sz = sizeof(cd->detectors) * dpd->num_radar_types; | 152 | sz = sizeof(cd->detectors) * dpd->num_radar_types; |
153 | cd->detectors = kzalloc(sz, GFP_KERNEL); | 153 | cd->detectors = kzalloc(sz, GFP_ATOMIC); |
154 | if (cd->detectors == NULL) | 154 | if (cd->detectors == NULL) |
155 | goto fail; | 155 | goto fail; |
156 | 156 | ||
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c index 91b8dceeadb1..5e48c5515b8c 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c +++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c | |||
@@ -218,7 +218,7 @@ static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts) | |||
218 | { | 218 | { |
219 | struct pulse_elem *p = pool_get_pulse_elem(); | 219 | struct pulse_elem *p = pool_get_pulse_elem(); |
220 | if (p == NULL) { | 220 | if (p == NULL) { |
221 | p = kmalloc(sizeof(*p), GFP_KERNEL); | 221 | p = kmalloc(sizeof(*p), GFP_ATOMIC); |
222 | if (p == NULL) { | 222 | if (p == NULL) { |
223 | DFS_POOL_STAT_INC(pulse_alloc_error); | 223 | DFS_POOL_STAT_INC(pulse_alloc_error); |
224 | return false; | 224 | return false; |
@@ -299,7 +299,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde, | |||
299 | ps.deadline_ts = ps.first_ts + ps.dur; | 299 | ps.deadline_ts = ps.first_ts + ps.dur; |
300 | new_ps = pool_get_pseq_elem(); | 300 | new_ps = pool_get_pseq_elem(); |
301 | if (new_ps == NULL) { | 301 | if (new_ps == NULL) { |
302 | new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL); | 302 | new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC); |
303 | if (new_ps == NULL) { | 303 | if (new_ps == NULL) { |
304 | DFS_POOL_STAT_INC(pseq_alloc_error); | 304 | DFS_POOL_STAT_INC(pseq_alloc_error); |
305 | return false; | 305 | return false; |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 716058b67557..a47f5e05fc04 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c | |||
@@ -796,7 +796,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv) | |||
796 | * required version. | 796 | * required version. |
797 | */ | 797 | */ |
798 | if (priv->fw_version_major != MAJOR_VERSION_REQ || | 798 | if (priv->fw_version_major != MAJOR_VERSION_REQ || |
799 | priv->fw_version_minor != MINOR_VERSION_REQ) { | 799 | priv->fw_version_minor < MINOR_VERSION_REQ) { |
800 | dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n", | 800 | dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n", |
801 | MAJOR_VERSION_REQ, MINOR_VERSION_REQ); | 801 | MAJOR_VERSION_REQ, MINOR_VERSION_REQ); |
802 | return -EINVAL; | 802 | return -EINVAL; |
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c index 39c84ecf6a42..7fdac6c7b3ea 100644 --- a/drivers/net/wireless/ath/ath9k/link.c +++ b/drivers/net/wireless/ath/ath9k/link.c | |||
@@ -170,7 +170,8 @@ void ath_rx_poll(unsigned long data) | |||
170 | { | 170 | { |
171 | struct ath_softc *sc = (struct ath_softc *)data; | 171 | struct ath_softc *sc = (struct ath_softc *)data; |
172 | 172 | ||
173 | ieee80211_queue_work(sc->hw, &sc->hw_check_work); | 173 | if (!test_bit(SC_OP_INVALID, &sc->sc_flags)) |
174 | ieee80211_queue_work(sc->hw, &sc->hw_check_work); | ||
174 | } | 175 | } |
175 | 176 | ||
176 | /* | 177 | /* |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6e66f9c6782b..988372d218a4 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -280,6 +280,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) | |||
280 | if (r) { | 280 | if (r) { |
281 | ath_err(common, | 281 | ath_err(common, |
282 | "Unable to reset channel, reset status %d\n", r); | 282 | "Unable to reset channel, reset status %d\n", r); |
283 | |||
284 | ath9k_hw_enable_interrupts(ah); | ||
285 | ath9k_queue_reset(sc, RESET_TYPE_BB_HANG); | ||
286 | |||
283 | goto out; | 287 | goto out; |
284 | } | 288 | } |
285 | 289 | ||
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 38bc5a7997ff..122146943bf2 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -1487,8 +1487,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1487 | const struct b43_dma_ops *ops; | 1487 | const struct b43_dma_ops *ops; |
1488 | struct b43_dmaring *ring; | 1488 | struct b43_dmaring *ring; |
1489 | struct b43_dmadesc_meta *meta; | 1489 | struct b43_dmadesc_meta *meta; |
1490 | static const struct b43_txstatus fake; /* filled with 0 */ | ||
1491 | const struct b43_txstatus *txstat; | ||
1490 | int slot, firstused; | 1492 | int slot, firstused; |
1491 | bool frame_succeed; | 1493 | bool frame_succeed; |
1494 | int skip; | ||
1495 | static u8 err_out1, err_out2; | ||
1492 | 1496 | ||
1493 | ring = parse_cookie(dev, status->cookie, &slot); | 1497 | ring = parse_cookie(dev, status->cookie, &slot); |
1494 | if (unlikely(!ring)) | 1498 | if (unlikely(!ring)) |
@@ -1501,13 +1505,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1501 | firstused = ring->current_slot - ring->used_slots + 1; | 1505 | firstused = ring->current_slot - ring->used_slots + 1; |
1502 | if (firstused < 0) | 1506 | if (firstused < 0) |
1503 | firstused = ring->nr_slots + firstused; | 1507 | firstused = ring->nr_slots + firstused; |
1508 | |||
1509 | skip = 0; | ||
1504 | if (unlikely(slot != firstused)) { | 1510 | if (unlikely(slot != firstused)) { |
1505 | /* This possibly is a firmware bug and will result in | 1511 | /* This possibly is a firmware bug and will result in |
1506 | * malfunction, memory leaks and/or stall of DMA functionality. */ | 1512 | * malfunction, memory leaks and/or stall of DMA functionality. |
1507 | b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " | 1513 | */ |
1508 | "Expected %d, but got %d\n", | 1514 | if (slot == next_slot(ring, next_slot(ring, firstused))) { |
1509 | ring->index, firstused, slot); | 1515 | /* If a single header/data pair was missed, skip over |
1510 | return; | 1516 | * the first two slots in an attempt to recover. |
1517 | */ | ||
1518 | slot = firstused; | ||
1519 | skip = 2; | ||
1520 | if (!err_out1) { | ||
1521 | /* Report the error once. */ | ||
1522 | b43dbg(dev->wl, | ||
1523 | "Skip on DMA ring %d slot %d.\n", | ||
1524 | ring->index, slot); | ||
1525 | err_out1 = 1; | ||
1526 | } | ||
1527 | } else { | ||
1528 | /* More than a single header/data pair were missed. | ||
1529 | * Report this error once. | ||
1530 | */ | ||
1531 | if (!err_out2) | ||
1532 | b43dbg(dev->wl, | ||
1533 | "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", | ||
1534 | ring->index, firstused, slot); | ||
1535 | err_out2 = 1; | ||
1536 | return; | ||
1537 | } | ||
1511 | } | 1538 | } |
1512 | 1539 | ||
1513 | ops = ring->ops; | 1540 | ops = ring->ops; |
@@ -1522,11 +1549,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1522 | slot, firstused, ring->index); | 1549 | slot, firstused, ring->index); |
1523 | break; | 1550 | break; |
1524 | } | 1551 | } |
1552 | |||
1525 | if (meta->skb) { | 1553 | if (meta->skb) { |
1526 | struct b43_private_tx_info *priv_info = | 1554 | struct b43_private_tx_info *priv_info = |
1527 | b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); | 1555 | b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); |
1528 | 1556 | ||
1529 | unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); | 1557 | unmap_descbuffer(ring, meta->dmaaddr, |
1558 | meta->skb->len, 1); | ||
1530 | kfree(priv_info->bouncebuffer); | 1559 | kfree(priv_info->bouncebuffer); |
1531 | priv_info->bouncebuffer = NULL; | 1560 | priv_info->bouncebuffer = NULL; |
1532 | } else { | 1561 | } else { |
@@ -1538,8 +1567,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1538 | struct ieee80211_tx_info *info; | 1567 | struct ieee80211_tx_info *info; |
1539 | 1568 | ||
1540 | if (unlikely(!meta->skb)) { | 1569 | if (unlikely(!meta->skb)) { |
1541 | /* This is a scatter-gather fragment of a frame, so | 1570 | /* This is a scatter-gather fragment of a frame, |
1542 | * the skb pointer must not be NULL. */ | 1571 | * so the skb pointer must not be NULL. |
1572 | */ | ||
1543 | b43dbg(dev->wl, "TX status unexpected NULL skb " | 1573 | b43dbg(dev->wl, "TX status unexpected NULL skb " |
1544 | "at slot %d (first=%d) on ring %d\n", | 1574 | "at slot %d (first=%d) on ring %d\n", |
1545 | slot, firstused, ring->index); | 1575 | slot, firstused, ring->index); |
@@ -1550,9 +1580,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1550 | 1580 | ||
1551 | /* | 1581 | /* |
1552 | * Call back to inform the ieee80211 subsystem about | 1582 | * Call back to inform the ieee80211 subsystem about |
1553 | * the status of the transmission. | 1583 | * the status of the transmission. When skipping over |
1584 | * a missed TX status report, use a status structure | ||
1585 | * filled with zeros to indicate that the frame was not | ||
1586 | * sent (frame_count 0) and not acknowledged | ||
1554 | */ | 1587 | */ |
1555 | frame_succeed = b43_fill_txstatus_report(dev, info, status); | 1588 | if (unlikely(skip)) |
1589 | txstat = &fake; | ||
1590 | else | ||
1591 | txstat = status; | ||
1592 | |||
1593 | frame_succeed = b43_fill_txstatus_report(dev, info, | ||
1594 | txstat); | ||
1556 | #ifdef CONFIG_B43_DEBUG | 1595 | #ifdef CONFIG_B43_DEBUG |
1557 | if (frame_succeed) | 1596 | if (frame_succeed) |
1558 | ring->nr_succeed_tx_packets++; | 1597 | ring->nr_succeed_tx_packets++; |
@@ -1580,12 +1619,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1580 | /* Everything unmapped and free'd. So it's not used anymore. */ | 1619 | /* Everything unmapped and free'd. So it's not used anymore. */ |
1581 | ring->used_slots--; | 1620 | ring->used_slots--; |
1582 | 1621 | ||
1583 | if (meta->is_last_fragment) { | 1622 | if (meta->is_last_fragment && !skip) { |
1584 | /* This is the last scatter-gather | 1623 | /* This is the last scatter-gather |
1585 | * fragment of the frame. We are done. */ | 1624 | * fragment of the frame. We are done. */ |
1586 | break; | 1625 | break; |
1587 | } | 1626 | } |
1588 | slot = next_slot(ring, slot); | 1627 | slot = next_slot(ring, slot); |
1628 | if (skip > 0) | ||
1629 | --skip; | ||
1589 | } | 1630 | } |
1590 | if (ring->stopped) { | 1631 | if (ring->stopped) { |
1591 | B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); | 1632 | B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); |
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index 3c35382ee6c2..b70f220bc4b3 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c | |||
@@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) | |||
1564 | u16 clip_off[2] = { 0xFFFF, 0xFFFF }; | 1564 | u16 clip_off[2] = { 0xFFFF, 0xFFFF }; |
1565 | 1565 | ||
1566 | u8 vcm_final = 0; | 1566 | u8 vcm_final = 0; |
1567 | s8 offset[4]; | 1567 | s32 offset[4]; |
1568 | s32 results[8][4] = { }; | 1568 | s32 results[8][4] = { }; |
1569 | s32 results_min[4] = { }; | 1569 | s32 results_min[4] = { }; |
1570 | s32 poll_results[4] = { }; | 1570 | s32 poll_results[4] = { }; |
@@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) | |||
1615 | } | 1615 | } |
1616 | for (i = 0; i < 4; i += 2) { | 1616 | for (i = 0; i < 4; i += 2) { |
1617 | s32 curr; | 1617 | s32 curr; |
1618 | s32 mind = 40; | 1618 | s32 mind = 0x100000; |
1619 | s32 minpoll = 249; | 1619 | s32 minpoll = 249; |
1620 | u8 minvcm = 0; | 1620 | u8 minvcm = 0; |
1621 | if (2 * core != i) | 1621 | if (2 * core != i) |
@@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) | |||
1732 | u8 regs_save_radio[2]; | 1732 | u8 regs_save_radio[2]; |
1733 | u16 regs_save_phy[2]; | 1733 | u16 regs_save_phy[2]; |
1734 | 1734 | ||
1735 | s8 offset[4]; | 1735 | s32 offset[4]; |
1736 | u8 core; | 1736 | u8 core; |
1737 | u8 rail; | 1737 | u8 rail; |
1738 | 1738 | ||
@@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) | |||
1799 | } | 1799 | } |
1800 | 1800 | ||
1801 | for (i = 0; i < 4; i++) { | 1801 | for (i = 0; i < 4; i++) { |
1802 | s32 mind = 40; | 1802 | s32 mind = 0x100000; |
1803 | u8 minvcm = 0; | 1803 | u8 minvcm = 0; |
1804 | s32 minpoll = 249; | 1804 | s32 minpoll = 249; |
1805 | s32 curr; | 1805 | s32 curr; |
@@ -5165,7 +5165,8 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid) | |||
5165 | #endif | 5165 | #endif |
5166 | #ifdef CONFIG_B43_SSB | 5166 | #ifdef CONFIG_B43_SSB |
5167 | case B43_BUS_SSB: | 5167 | case B43_BUS_SSB: |
5168 | /* FIXME */ | 5168 | ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco, |
5169 | avoid); | ||
5169 | break; | 5170 | break; |
5170 | #endif | 5171 | #endif |
5171 | } | 5172 | } |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 4469321c0eb3..35fc68be158d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c | |||
@@ -3317,15 +3317,15 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus) | |||
3317 | goto err; | 3317 | goto err; |
3318 | } | 3318 | } |
3319 | 3319 | ||
3320 | /* External image takes precedence if specified */ | ||
3321 | if (brcmf_sdbrcm_download_code_file(bus)) { | 3320 | if (brcmf_sdbrcm_download_code_file(bus)) { |
3322 | brcmf_err("dongle image file download failed\n"); | 3321 | brcmf_err("dongle image file download failed\n"); |
3323 | goto err; | 3322 | goto err; |
3324 | } | 3323 | } |
3325 | 3324 | ||
3326 | /* External nvram takes precedence if specified */ | 3325 | if (brcmf_sdbrcm_download_nvram(bus)) { |
3327 | if (brcmf_sdbrcm_download_nvram(bus)) | ||
3328 | brcmf_err("dongle nvram file download failed\n"); | 3326 | brcmf_err("dongle nvram file download failed\n"); |
3327 | goto err; | ||
3328 | } | ||
3329 | 3329 | ||
3330 | /* Take arm out of reset */ | 3330 | /* Take arm out of reset */ |
3331 | if (brcmf_sdbrcm_download_state(bus, false)) { | 3331 | if (brcmf_sdbrcm_download_state(bus, false)) { |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 2af9c0f0798d..78da3eff75e8 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c | |||
@@ -1891,8 +1891,10 @@ static s32 | |||
1891 | brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, | 1891 | brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, |
1892 | u8 key_idx, const u8 *mac_addr, struct key_params *params) | 1892 | u8 key_idx, const u8 *mac_addr, struct key_params *params) |
1893 | { | 1893 | { |
1894 | struct brcmf_if *ifp = netdev_priv(ndev); | ||
1894 | struct brcmf_wsec_key key; | 1895 | struct brcmf_wsec_key key; |
1895 | s32 err = 0; | 1896 | s32 err = 0; |
1897 | u8 keybuf[8]; | ||
1896 | 1898 | ||
1897 | memset(&key, 0, sizeof(key)); | 1899 | memset(&key, 0, sizeof(key)); |
1898 | key.index = (u32) key_idx; | 1900 | key.index = (u32) key_idx; |
@@ -1916,8 +1918,9 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, | |||
1916 | brcmf_dbg(CONN, "Setting the key index %d\n", key.index); | 1918 | brcmf_dbg(CONN, "Setting the key index %d\n", key.index); |
1917 | memcpy(key.data, params->key, key.len); | 1919 | memcpy(key.data, params->key, key.len); |
1918 | 1920 | ||
1919 | if (params->cipher == WLAN_CIPHER_SUITE_TKIP) { | 1921 | if ((ifp->vif->mode != WL_MODE_AP) && |
1920 | u8 keybuf[8]; | 1922 | (params->cipher == WLAN_CIPHER_SUITE_TKIP)) { |
1923 | brcmf_dbg(CONN, "Swapping RX/TX MIC key\n"); | ||
1921 | memcpy(keybuf, &key.data[24], sizeof(keybuf)); | 1924 | memcpy(keybuf, &key.data[24], sizeof(keybuf)); |
1922 | memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); | 1925 | memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); |
1923 | memcpy(&key.data[16], keybuf, sizeof(keybuf)); | 1926 | memcpy(&key.data[16], keybuf, sizeof(keybuf)); |
@@ -2013,7 +2016,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, | |||
2013 | break; | 2016 | break; |
2014 | case WLAN_CIPHER_SUITE_TKIP: | 2017 | case WLAN_CIPHER_SUITE_TKIP: |
2015 | if (ifp->vif->mode != WL_MODE_AP) { | 2018 | if (ifp->vif->mode != WL_MODE_AP) { |
2016 | brcmf_dbg(CONN, "Swapping key\n"); | 2019 | brcmf_dbg(CONN, "Swapping RX/TX MIC key\n"); |
2017 | memcpy(keybuf, &key.data[24], sizeof(keybuf)); | 2020 | memcpy(keybuf, &key.data[24], sizeof(keybuf)); |
2018 | memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); | 2021 | memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); |
2019 | memcpy(&key.data[16], keybuf, sizeof(keybuf)); | 2022 | memcpy(&key.data[16], keybuf, sizeof(keybuf)); |
@@ -2118,8 +2121,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, | |||
2118 | err = -EAGAIN; | 2121 | err = -EAGAIN; |
2119 | goto done; | 2122 | goto done; |
2120 | } | 2123 | } |
2121 | switch (wsec & ~SES_OW_ENABLED) { | 2124 | if (wsec & WEP_ENABLED) { |
2122 | case WEP_ENABLED: | ||
2123 | sec = &profile->sec; | 2125 | sec = &profile->sec; |
2124 | if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { | 2126 | if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { |
2125 | params.cipher = WLAN_CIPHER_SUITE_WEP40; | 2127 | params.cipher = WLAN_CIPHER_SUITE_WEP40; |
@@ -2128,16 +2130,13 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, | |||
2128 | params.cipher = WLAN_CIPHER_SUITE_WEP104; | 2130 | params.cipher = WLAN_CIPHER_SUITE_WEP104; |
2129 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); | 2131 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); |
2130 | } | 2132 | } |
2131 | break; | 2133 | } else if (wsec & TKIP_ENABLED) { |
2132 | case TKIP_ENABLED: | ||
2133 | params.cipher = WLAN_CIPHER_SUITE_TKIP; | 2134 | params.cipher = WLAN_CIPHER_SUITE_TKIP; |
2134 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); | 2135 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); |
2135 | break; | 2136 | } else if (wsec & AES_ENABLED) { |
2136 | case AES_ENABLED: | ||
2137 | params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; | 2137 | params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; |
2138 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); | 2138 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); |
2139 | break; | 2139 | } else { |
2140 | default: | ||
2141 | brcmf_err("Invalid algo (0x%x)\n", wsec); | 2140 | brcmf_err("Invalid algo (0x%x)\n", wsec); |
2142 | err = -EINVAL; | 2141 | err = -EINVAL; |
2143 | goto done; | 2142 | goto done; |
@@ -3824,8 +3823,9 @@ exit: | |||
3824 | static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) | 3823 | static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) |
3825 | { | 3824 | { |
3826 | struct brcmf_if *ifp = netdev_priv(ndev); | 3825 | struct brcmf_if *ifp = netdev_priv(ndev); |
3827 | s32 err = -EPERM; | 3826 | s32 err; |
3828 | struct brcmf_fil_bss_enable_le bss_enable; | 3827 | struct brcmf_fil_bss_enable_le bss_enable; |
3828 | struct brcmf_join_params join_params; | ||
3829 | 3829 | ||
3830 | brcmf_dbg(TRACE, "Enter\n"); | 3830 | brcmf_dbg(TRACE, "Enter\n"); |
3831 | 3831 | ||
@@ -3833,16 +3833,21 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) | |||
3833 | /* Due to most likely deauths outstanding we sleep */ | 3833 | /* Due to most likely deauths outstanding we sleep */ |
3834 | /* first to make sure they get processed by fw. */ | 3834 | /* first to make sure they get processed by fw. */ |
3835 | msleep(400); | 3835 | msleep(400); |
3836 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); | 3836 | |
3837 | if (err < 0) { | 3837 | memset(&join_params, 0, sizeof(join_params)); |
3838 | brcmf_err("setting AP mode failed %d\n", err); | 3838 | err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, |
3839 | goto exit; | 3839 | &join_params, sizeof(join_params)); |
3840 | } | 3840 | if (err < 0) |
3841 | brcmf_err("SET SSID error (%d)\n", err); | ||
3841 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); | 3842 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); |
3842 | if (err < 0) { | 3843 | if (err < 0) |
3843 | brcmf_err("BRCMF_C_UP error %d\n", err); | 3844 | brcmf_err("BRCMF_C_UP error %d\n", err); |
3844 | goto exit; | 3845 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); |
3845 | } | 3846 | if (err < 0) |
3847 | brcmf_err("setting AP mode failed %d\n", err); | ||
3848 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0); | ||
3849 | if (err < 0) | ||
3850 | brcmf_err("setting INFRA mode failed %d\n", err); | ||
3846 | } else { | 3851 | } else { |
3847 | bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); | 3852 | bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); |
3848 | bss_enable.enable = cpu_to_le32(0); | 3853 | bss_enable.enable = cpu_to_le32(0); |
@@ -3855,7 +3860,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) | |||
3855 | set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); | 3860 | set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); |
3856 | clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); | 3861 | clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); |
3857 | 3862 | ||
3858 | exit: | ||
3859 | return err; | 3863 | return err; |
3860 | } | 3864 | } |
3861 | 3865 | ||
@@ -4124,10 +4128,6 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = { | |||
4124 | }, | 4128 | }, |
4125 | { | 4129 | { |
4126 | .max = 1, | 4130 | .max = 1, |
4127 | .types = BIT(NL80211_IFTYPE_P2P_DEVICE) | ||
4128 | }, | ||
4129 | { | ||
4130 | .max = 1, | ||
4131 | .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | | 4131 | .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | |
4132 | BIT(NL80211_IFTYPE_P2P_GO) | 4132 | BIT(NL80211_IFTYPE_P2P_GO) |
4133 | }, | 4133 | }, |
@@ -4183,8 +4183,7 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev) | |||
4183 | BIT(NL80211_IFTYPE_ADHOC) | | 4183 | BIT(NL80211_IFTYPE_ADHOC) | |
4184 | BIT(NL80211_IFTYPE_AP) | | 4184 | BIT(NL80211_IFTYPE_AP) | |
4185 | BIT(NL80211_IFTYPE_P2P_CLIENT) | | 4185 | BIT(NL80211_IFTYPE_P2P_CLIENT) | |
4186 | BIT(NL80211_IFTYPE_P2P_GO) | | 4186 | BIT(NL80211_IFTYPE_P2P_GO); |
4187 | BIT(NL80211_IFTYPE_P2P_DEVICE); | ||
4188 | wiphy->iface_combinations = brcmf_iface_combos; | 4187 | wiphy->iface_combinations = brcmf_iface_combos; |
4189 | wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos); | 4188 | wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos); |
4190 | wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; | 4189 | wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index c6451c61407a..e2340b231aa1 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c | |||
@@ -274,6 +274,130 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br) | |||
274 | } | 274 | } |
275 | } | 275 | } |
276 | 276 | ||
277 | /** | ||
278 | * This function frees the WL per-device resources. | ||
279 | * | ||
280 | * This function frees resources owned by the WL device pointed to | ||
281 | * by the wl parameter. | ||
282 | * | ||
283 | * precondition: can both be called locked and unlocked | ||
284 | * | ||
285 | */ | ||
286 | static void brcms_free(struct brcms_info *wl) | ||
287 | { | ||
288 | struct brcms_timer *t, *next; | ||
289 | |||
290 | /* free ucode data */ | ||
291 | if (wl->fw.fw_cnt) | ||
292 | brcms_ucode_data_free(&wl->ucode); | ||
293 | if (wl->irq) | ||
294 | free_irq(wl->irq, wl); | ||
295 | |||
296 | /* kill dpc */ | ||
297 | tasklet_kill(&wl->tasklet); | ||
298 | |||
299 | if (wl->pub) { | ||
300 | brcms_debugfs_detach(wl->pub); | ||
301 | brcms_c_module_unregister(wl->pub, "linux", wl); | ||
302 | } | ||
303 | |||
304 | /* free common resources */ | ||
305 | if (wl->wlc) { | ||
306 | brcms_c_detach(wl->wlc); | ||
307 | wl->wlc = NULL; | ||
308 | wl->pub = NULL; | ||
309 | } | ||
310 | |||
311 | /* virtual interface deletion is deferred so we cannot spinwait */ | ||
312 | |||
313 | /* wait for all pending callbacks to complete */ | ||
314 | while (atomic_read(&wl->callbacks) > 0) | ||
315 | schedule(); | ||
316 | |||
317 | /* free timers */ | ||
318 | for (t = wl->timers; t; t = next) { | ||
319 | next = t->next; | ||
320 | #ifdef DEBUG | ||
321 | kfree(t->name); | ||
322 | #endif | ||
323 | kfree(t); | ||
324 | } | ||
325 | } | ||
326 | |||
327 | /* | ||
328 | * called from both kernel as from this kernel module (error flow on attach) | ||
329 | * precondition: perimeter lock is not acquired. | ||
330 | */ | ||
331 | static void brcms_remove(struct bcma_device *pdev) | ||
332 | { | ||
333 | struct ieee80211_hw *hw = bcma_get_drvdata(pdev); | ||
334 | struct brcms_info *wl = hw->priv; | ||
335 | |||
336 | if (wl->wlc) { | ||
337 | wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); | ||
338 | wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); | ||
339 | ieee80211_unregister_hw(hw); | ||
340 | } | ||
341 | |||
342 | brcms_free(wl); | ||
343 | |||
344 | bcma_set_drvdata(pdev, NULL); | ||
345 | ieee80211_free_hw(hw); | ||
346 | } | ||
347 | |||
348 | /* | ||
349 | * Precondition: Since this function is called in brcms_pci_probe() context, | ||
350 | * no locking is required. | ||
351 | */ | ||
352 | static void brcms_release_fw(struct brcms_info *wl) | ||
353 | { | ||
354 | int i; | ||
355 | for (i = 0; i < MAX_FW_IMAGES; i++) { | ||
356 | release_firmware(wl->fw.fw_bin[i]); | ||
357 | release_firmware(wl->fw.fw_hdr[i]); | ||
358 | } | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * Precondition: Since this function is called in brcms_pci_probe() context, | ||
363 | * no locking is required. | ||
364 | */ | ||
365 | static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev) | ||
366 | { | ||
367 | int status; | ||
368 | struct device *device = &pdev->dev; | ||
369 | char fw_name[100]; | ||
370 | int i; | ||
371 | |||
372 | memset(&wl->fw, 0, sizeof(struct brcms_firmware)); | ||
373 | for (i = 0; i < MAX_FW_IMAGES; i++) { | ||
374 | if (brcms_firmwares[i] == NULL) | ||
375 | break; | ||
376 | sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i], | ||
377 | UCODE_LOADER_API_VER); | ||
378 | status = request_firmware(&wl->fw.fw_bin[i], fw_name, device); | ||
379 | if (status) { | ||
380 | wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", | ||
381 | KBUILD_MODNAME, fw_name); | ||
382 | return status; | ||
383 | } | ||
384 | sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i], | ||
385 | UCODE_LOADER_API_VER); | ||
386 | status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device); | ||
387 | if (status) { | ||
388 | wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", | ||
389 | KBUILD_MODNAME, fw_name); | ||
390 | return status; | ||
391 | } | ||
392 | wl->fw.hdr_num_entries[i] = | ||
393 | wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr)); | ||
394 | } | ||
395 | wl->fw.fw_cnt = i; | ||
396 | status = brcms_ucode_data_init(wl, &wl->ucode); | ||
397 | brcms_release_fw(wl); | ||
398 | return status; | ||
399 | } | ||
400 | |||
277 | static void brcms_ops_tx(struct ieee80211_hw *hw, | 401 | static void brcms_ops_tx(struct ieee80211_hw *hw, |
278 | struct ieee80211_tx_control *control, | 402 | struct ieee80211_tx_control *control, |
279 | struct sk_buff *skb) | 403 | struct sk_buff *skb) |
@@ -306,6 +430,14 @@ static int brcms_ops_start(struct ieee80211_hw *hw) | |||
306 | if (!blocked) | 430 | if (!blocked) |
307 | wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); | 431 | wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); |
308 | 432 | ||
433 | if (!wl->ucode.bcm43xx_bomminor) { | ||
434 | err = brcms_request_fw(wl, wl->wlc->hw->d11core); | ||
435 | if (err) { | ||
436 | brcms_remove(wl->wlc->hw->d11core); | ||
437 | return -ENOENT; | ||
438 | } | ||
439 | } | ||
440 | |||
309 | spin_lock_bh(&wl->lock); | 441 | spin_lock_bh(&wl->lock); |
310 | /* avoid acknowledging frames before a non-monitor device is added */ | 442 | /* avoid acknowledging frames before a non-monitor device is added */ |
311 | wl->mute_tx = true; | 443 | wl->mute_tx = true; |
@@ -793,128 +925,6 @@ void brcms_dpc(unsigned long data) | |||
793 | wake_up(&wl->tx_flush_wq); | 925 | wake_up(&wl->tx_flush_wq); |
794 | } | 926 | } |
795 | 927 | ||
796 | /* | ||
797 | * Precondition: Since this function is called in brcms_pci_probe() context, | ||
798 | * no locking is required. | ||
799 | */ | ||
800 | static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev) | ||
801 | { | ||
802 | int status; | ||
803 | struct device *device = &pdev->dev; | ||
804 | char fw_name[100]; | ||
805 | int i; | ||
806 | |||
807 | memset(&wl->fw, 0, sizeof(struct brcms_firmware)); | ||
808 | for (i = 0; i < MAX_FW_IMAGES; i++) { | ||
809 | if (brcms_firmwares[i] == NULL) | ||
810 | break; | ||
811 | sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i], | ||
812 | UCODE_LOADER_API_VER); | ||
813 | status = request_firmware(&wl->fw.fw_bin[i], fw_name, device); | ||
814 | if (status) { | ||
815 | wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", | ||
816 | KBUILD_MODNAME, fw_name); | ||
817 | return status; | ||
818 | } | ||
819 | sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i], | ||
820 | UCODE_LOADER_API_VER); | ||
821 | status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device); | ||
822 | if (status) { | ||
823 | wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", | ||
824 | KBUILD_MODNAME, fw_name); | ||
825 | return status; | ||
826 | } | ||
827 | wl->fw.hdr_num_entries[i] = | ||
828 | wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr)); | ||
829 | } | ||
830 | wl->fw.fw_cnt = i; | ||
831 | return brcms_ucode_data_init(wl, &wl->ucode); | ||
832 | } | ||
833 | |||
834 | /* | ||
835 | * Precondition: Since this function is called in brcms_pci_probe() context, | ||
836 | * no locking is required. | ||
837 | */ | ||
838 | static void brcms_release_fw(struct brcms_info *wl) | ||
839 | { | ||
840 | int i; | ||
841 | for (i = 0; i < MAX_FW_IMAGES; i++) { | ||
842 | release_firmware(wl->fw.fw_bin[i]); | ||
843 | release_firmware(wl->fw.fw_hdr[i]); | ||
844 | } | ||
845 | } | ||
846 | |||
847 | /** | ||
848 | * This function frees the WL per-device resources. | ||
849 | * | ||
850 | * This function frees resources owned by the WL device pointed to | ||
851 | * by the wl parameter. | ||
852 | * | ||
853 | * precondition: can both be called locked and unlocked | ||
854 | * | ||
855 | */ | ||
856 | static void brcms_free(struct brcms_info *wl) | ||
857 | { | ||
858 | struct brcms_timer *t, *next; | ||
859 | |||
860 | /* free ucode data */ | ||
861 | if (wl->fw.fw_cnt) | ||
862 | brcms_ucode_data_free(&wl->ucode); | ||
863 | if (wl->irq) | ||
864 | free_irq(wl->irq, wl); | ||
865 | |||
866 | /* kill dpc */ | ||
867 | tasklet_kill(&wl->tasklet); | ||
868 | |||
869 | if (wl->pub) { | ||
870 | brcms_debugfs_detach(wl->pub); | ||
871 | brcms_c_module_unregister(wl->pub, "linux", wl); | ||
872 | } | ||
873 | |||
874 | /* free common resources */ | ||
875 | if (wl->wlc) { | ||
876 | brcms_c_detach(wl->wlc); | ||
877 | wl->wlc = NULL; | ||
878 | wl->pub = NULL; | ||
879 | } | ||
880 | |||
881 | /* virtual interface deletion is deferred so we cannot spinwait */ | ||
882 | |||
883 | /* wait for all pending callbacks to complete */ | ||
884 | while (atomic_read(&wl->callbacks) > 0) | ||
885 | schedule(); | ||
886 | |||
887 | /* free timers */ | ||
888 | for (t = wl->timers; t; t = next) { | ||
889 | next = t->next; | ||
890 | #ifdef DEBUG | ||
891 | kfree(t->name); | ||
892 | #endif | ||
893 | kfree(t); | ||
894 | } | ||
895 | } | ||
896 | |||
897 | /* | ||
898 | * called from both kernel as from this kernel module (error flow on attach) | ||
899 | * precondition: perimeter lock is not acquired. | ||
900 | */ | ||
901 | static void brcms_remove(struct bcma_device *pdev) | ||
902 | { | ||
903 | struct ieee80211_hw *hw = bcma_get_drvdata(pdev); | ||
904 | struct brcms_info *wl = hw->priv; | ||
905 | |||
906 | if (wl->wlc) { | ||
907 | wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); | ||
908 | wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); | ||
909 | ieee80211_unregister_hw(hw); | ||
910 | } | ||
911 | |||
912 | brcms_free(wl); | ||
913 | |||
914 | bcma_set_drvdata(pdev, NULL); | ||
915 | ieee80211_free_hw(hw); | ||
916 | } | ||
917 | |||
918 | static irqreturn_t brcms_isr(int irq, void *dev_id) | 928 | static irqreturn_t brcms_isr(int irq, void *dev_id) |
919 | { | 929 | { |
920 | struct brcms_info *wl; | 930 | struct brcms_info *wl; |
@@ -1047,18 +1057,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev) | |||
1047 | spin_lock_init(&wl->lock); | 1057 | spin_lock_init(&wl->lock); |
1048 | spin_lock_init(&wl->isr_lock); | 1058 | spin_lock_init(&wl->isr_lock); |
1049 | 1059 | ||
1050 | /* prepare ucode */ | ||
1051 | if (brcms_request_fw(wl, pdev) < 0) { | ||
1052 | wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in " | ||
1053 | "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm"); | ||
1054 | brcms_release_fw(wl); | ||
1055 | brcms_remove(pdev); | ||
1056 | return NULL; | ||
1057 | } | ||
1058 | |||
1059 | /* common load-time initialization */ | 1060 | /* common load-time initialization */ |
1060 | wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err); | 1061 | wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err); |
1061 | brcms_release_fw(wl); | ||
1062 | if (!wl->wlc) { | 1062 | if (!wl->wlc) { |
1063 | wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n", | 1063 | wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n", |
1064 | KBUILD_MODNAME, err); | 1064 | KBUILD_MODNAME, err); |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index 21a824232478..18d37645e2cd 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c | |||
@@ -1137,9 +1137,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi, | |||
1137 | gain0_15 = ((biq1 & 0xf) << 12) | | 1137 | gain0_15 = ((biq1 & 0xf) << 12) | |
1138 | ((tia & 0xf) << 8) | | 1138 | ((tia & 0xf) << 8) | |
1139 | ((lna2 & 0x3) << 6) | | 1139 | ((lna2 & 0x3) << 6) | |
1140 | ((lna2 & 0x3) << 4) | | 1140 | ((lna2 & |
1141 | ((lna1 & 0x3) << 2) | | 1141 | 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0); |
1142 | ((lna1 & 0x3) << 0); | ||
1143 | 1142 | ||
1144 | mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); | 1143 | mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); |
1145 | mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); | 1144 | mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); |
@@ -1157,8 +1156,6 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi, | |||
1157 | } | 1156 | } |
1158 | 1157 | ||
1159 | mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0); | 1158 | mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0); |
1160 | mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11); | ||
1161 | mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3); | ||
1162 | 1159 | ||
1163 | } | 1160 | } |
1164 | 1161 | ||
@@ -1331,43 +1328,6 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples) | |||
1331 | return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; | 1328 | return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; |
1332 | } | 1329 | } |
1333 | 1330 | ||
1334 | static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain, | ||
1335 | u16 tia_gain, u16 lna2_gain) | ||
1336 | { | ||
1337 | u32 i_thresh_l, q_thresh_l; | ||
1338 | u32 i_thresh_h, q_thresh_h; | ||
1339 | struct lcnphy_iq_est iq_est_h, iq_est_l; | ||
1340 | |||
1341 | wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain, | ||
1342 | lna2_gain, 0); | ||
1343 | |||
1344 | wlc_lcnphy_rx_gain_override_enable(pi, true); | ||
1345 | wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0); | ||
1346 | udelay(500); | ||
1347 | write_radio_reg(pi, RADIO_2064_REG112, 0); | ||
1348 | if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l)) | ||
1349 | return false; | ||
1350 | |||
1351 | wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0); | ||
1352 | udelay(500); | ||
1353 | write_radio_reg(pi, RADIO_2064_REG112, 0); | ||
1354 | if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h)) | ||
1355 | return false; | ||
1356 | |||
1357 | i_thresh_l = (iq_est_l.i_pwr << 1); | ||
1358 | i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr; | ||
1359 | |||
1360 | q_thresh_l = (iq_est_l.q_pwr << 1); | ||
1361 | q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr; | ||
1362 | if ((iq_est_h.i_pwr > i_thresh_l) && | ||
1363 | (iq_est_h.i_pwr < i_thresh_h) && | ||
1364 | (iq_est_h.q_pwr > q_thresh_l) && | ||
1365 | (iq_est_h.q_pwr < q_thresh_h)) | ||
1366 | return true; | ||
1367 | |||
1368 | return false; | ||
1369 | } | ||
1370 | |||
1371 | static bool | 1331 | static bool |
1372 | wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, | 1332 | wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, |
1373 | const struct lcnphy_rx_iqcomp *iqcomp, | 1333 | const struct lcnphy_rx_iqcomp *iqcomp, |
@@ -1382,8 +1342,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, | |||
1382 | RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, | 1342 | RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, |
1383 | rfoverride3_old, rfoverride3val_old, rfoverride4_old, | 1343 | rfoverride3_old, rfoverride3val_old, rfoverride4_old, |
1384 | rfoverride4val_old, afectrlovr_old, afectrlovrval_old; | 1344 | rfoverride4val_old, afectrlovr_old, afectrlovrval_old; |
1385 | int tia_gain, lna2_gain, biq1_gain; | 1345 | int tia_gain; |
1386 | bool set_gain; | 1346 | u32 received_power, rx_pwr_threshold; |
1387 | u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; | 1347 | u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; |
1388 | u16 values_to_save[11]; | 1348 | u16 values_to_save[11]; |
1389 | s16 *ptr; | 1349 | s16 *ptr; |
@@ -1408,134 +1368,126 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, | |||
1408 | goto cal_done; | 1368 | goto cal_done; |
1409 | } | 1369 | } |
1410 | 1370 | ||
1411 | WARN_ON(module != 1); | 1371 | if (module == 1) { |
1412 | tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); | ||
1413 | wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); | ||
1414 | |||
1415 | for (i = 0; i < 11; i++) | ||
1416 | values_to_save[i] = | ||
1417 | read_radio_reg(pi, rxiq_cal_rf_reg[i]); | ||
1418 | Core1TxControl_old = read_phy_reg(pi, 0x631); | ||
1419 | |||
1420 | or_phy_reg(pi, 0x631, 0x0015); | ||
1421 | |||
1422 | RFOverride0_old = read_phy_reg(pi, 0x44c); | ||
1423 | RFOverrideVal0_old = read_phy_reg(pi, 0x44d); | ||
1424 | rfoverride2_old = read_phy_reg(pi, 0x4b0); | ||
1425 | rfoverride2val_old = read_phy_reg(pi, 0x4b1); | ||
1426 | rfoverride3_old = read_phy_reg(pi, 0x4f9); | ||
1427 | rfoverride3val_old = read_phy_reg(pi, 0x4fa); | ||
1428 | rfoverride4_old = read_phy_reg(pi, 0x938); | ||
1429 | rfoverride4val_old = read_phy_reg(pi, 0x939); | ||
1430 | afectrlovr_old = read_phy_reg(pi, 0x43b); | ||
1431 | afectrlovrval_old = read_phy_reg(pi, 0x43c); | ||
1432 | old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); | ||
1433 | old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); | ||
1434 | |||
1435 | tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); | ||
1436 | if (tx_gain_override_old) { | ||
1437 | wlc_lcnphy_get_tx_gain(pi, &old_gains); | ||
1438 | tx_gain_index_old = pi_lcn->lcnphy_current_index; | ||
1439 | } | ||
1440 | |||
1441 | wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); | ||
1442 | 1372 | ||
1443 | mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); | 1373 | tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); |
1444 | mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); | 1374 | wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); |
1445 | 1375 | ||
1446 | mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); | 1376 | for (i = 0; i < 11; i++) |
1447 | mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); | 1377 | values_to_save[i] = |
1378 | read_radio_reg(pi, rxiq_cal_rf_reg[i]); | ||
1379 | Core1TxControl_old = read_phy_reg(pi, 0x631); | ||
1380 | |||
1381 | or_phy_reg(pi, 0x631, 0x0015); | ||
1382 | |||
1383 | RFOverride0_old = read_phy_reg(pi, 0x44c); | ||
1384 | RFOverrideVal0_old = read_phy_reg(pi, 0x44d); | ||
1385 | rfoverride2_old = read_phy_reg(pi, 0x4b0); | ||
1386 | rfoverride2val_old = read_phy_reg(pi, 0x4b1); | ||
1387 | rfoverride3_old = read_phy_reg(pi, 0x4f9); | ||
1388 | rfoverride3val_old = read_phy_reg(pi, 0x4fa); | ||
1389 | rfoverride4_old = read_phy_reg(pi, 0x938); | ||
1390 | rfoverride4val_old = read_phy_reg(pi, 0x939); | ||
1391 | afectrlovr_old = read_phy_reg(pi, 0x43b); | ||
1392 | afectrlovrval_old = read_phy_reg(pi, 0x43c); | ||
1393 | old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); | ||
1394 | old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); | ||
1395 | |||
1396 | tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); | ||
1397 | if (tx_gain_override_old) { | ||
1398 | wlc_lcnphy_get_tx_gain(pi, &old_gains); | ||
1399 | tx_gain_index_old = pi_lcn->lcnphy_current_index; | ||
1400 | } | ||
1448 | 1401 | ||
1449 | write_radio_reg(pi, RADIO_2064_REG116, 0x06); | 1402 | wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); |
1450 | write_radio_reg(pi, RADIO_2064_REG12C, 0x07); | ||
1451 | write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); | ||
1452 | write_radio_reg(pi, RADIO_2064_REG098, 0x03); | ||
1453 | write_radio_reg(pi, RADIO_2064_REG00B, 0x7); | ||
1454 | mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); | ||
1455 | write_radio_reg(pi, RADIO_2064_REG01D, 0x01); | ||
1456 | write_radio_reg(pi, RADIO_2064_REG114, 0x01); | ||
1457 | write_radio_reg(pi, RADIO_2064_REG02E, 0x10); | ||
1458 | write_radio_reg(pi, RADIO_2064_REG12A, 0x08); | ||
1459 | |||
1460 | mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); | ||
1461 | mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); | ||
1462 | mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); | ||
1463 | mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); | ||
1464 | mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); | ||
1465 | mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); | ||
1466 | mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); | ||
1467 | mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); | ||
1468 | mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); | ||
1469 | mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); | ||
1470 | 1403 | ||
1471 | mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); | 1404 | mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); |
1472 | mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); | 1405 | mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); |
1473 | 1406 | ||
1474 | write_phy_reg(pi, 0x6da, 0xffff); | 1407 | mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); |
1475 | or_phy_reg(pi, 0x6db, 0x3); | 1408 | mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); |
1476 | 1409 | ||
1477 | wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); | 1410 | write_radio_reg(pi, RADIO_2064_REG116, 0x06); |
1478 | set_gain = false; | 1411 | write_radio_reg(pi, RADIO_2064_REG12C, 0x07); |
1479 | 1412 | write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); | |
1480 | lna2_gain = 3; | 1413 | write_radio_reg(pi, RADIO_2064_REG098, 0x03); |
1481 | while ((lna2_gain >= 0) && !set_gain) { | 1414 | write_radio_reg(pi, RADIO_2064_REG00B, 0x7); |
1482 | tia_gain = 4; | 1415 | mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); |
1483 | 1416 | write_radio_reg(pi, RADIO_2064_REG01D, 0x01); | |
1484 | while ((tia_gain >= 0) && !set_gain) { | 1417 | write_radio_reg(pi, RADIO_2064_REG114, 0x01); |
1485 | biq1_gain = 6; | 1418 | write_radio_reg(pi, RADIO_2064_REG02E, 0x10); |
1486 | 1419 | write_radio_reg(pi, RADIO_2064_REG12A, 0x08); | |
1487 | while ((biq1_gain >= 0) && !set_gain) { | 1420 | |
1488 | set_gain = wlc_lcnphy_rx_iq_cal_gain(pi, | 1421 | mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); |
1489 | (u16) | 1422 | mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); |
1490 | biq1_gain, | 1423 | mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); |
1491 | (u16) | 1424 | mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); |
1492 | tia_gain, | 1425 | mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); |
1493 | (u16) | 1426 | mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); |
1494 | lna2_gain); | 1427 | mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); |
1495 | biq1_gain -= 1; | 1428 | mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); |
1496 | } | 1429 | mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); |
1430 | mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); | ||
1431 | |||
1432 | mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); | ||
1433 | mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); | ||
1434 | |||
1435 | wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0); | ||
1436 | write_phy_reg(pi, 0x6da, 0xffff); | ||
1437 | or_phy_reg(pi, 0x6db, 0x3); | ||
1438 | wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); | ||
1439 | wlc_lcnphy_rx_gain_override_enable(pi, true); | ||
1440 | |||
1441 | tia_gain = 8; | ||
1442 | rx_pwr_threshold = 950; | ||
1443 | while (tia_gain > 0) { | ||
1497 | tia_gain -= 1; | 1444 | tia_gain -= 1; |
1445 | wlc_lcnphy_set_rx_gain_by_distribution(pi, | ||
1446 | 0, 0, 2, 2, | ||
1447 | (u16) | ||
1448 | tia_gain, 1, 0); | ||
1449 | udelay(500); | ||
1450 | |||
1451 | received_power = | ||
1452 | wlc_lcnphy_measure_digital_power(pi, 2000); | ||
1453 | if (received_power < rx_pwr_threshold) | ||
1454 | break; | ||
1498 | } | 1455 | } |
1499 | lna2_gain -= 1; | 1456 | result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff); |
1500 | } | ||
1501 | 1457 | ||
1502 | if (set_gain) | 1458 | wlc_lcnphy_stop_tx_tone(pi); |
1503 | result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024); | ||
1504 | else | ||
1505 | result = false; | ||
1506 | 1459 | ||
1507 | wlc_lcnphy_stop_tx_tone(pi); | 1460 | write_phy_reg(pi, 0x631, Core1TxControl_old); |
1508 | 1461 | ||
1509 | write_phy_reg(pi, 0x631, Core1TxControl_old); | 1462 | write_phy_reg(pi, 0x44c, RFOverrideVal0_old); |
1510 | 1463 | write_phy_reg(pi, 0x44d, RFOverrideVal0_old); | |
1511 | write_phy_reg(pi, 0x44c, RFOverrideVal0_old); | 1464 | write_phy_reg(pi, 0x4b0, rfoverride2_old); |
1512 | write_phy_reg(pi, 0x44d, RFOverrideVal0_old); | 1465 | write_phy_reg(pi, 0x4b1, rfoverride2val_old); |
1513 | write_phy_reg(pi, 0x4b0, rfoverride2_old); | 1466 | write_phy_reg(pi, 0x4f9, rfoverride3_old); |
1514 | write_phy_reg(pi, 0x4b1, rfoverride2val_old); | 1467 | write_phy_reg(pi, 0x4fa, rfoverride3val_old); |
1515 | write_phy_reg(pi, 0x4f9, rfoverride3_old); | 1468 | write_phy_reg(pi, 0x938, rfoverride4_old); |
1516 | write_phy_reg(pi, 0x4fa, rfoverride3val_old); | 1469 | write_phy_reg(pi, 0x939, rfoverride4val_old); |
1517 | write_phy_reg(pi, 0x938, rfoverride4_old); | 1470 | write_phy_reg(pi, 0x43b, afectrlovr_old); |
1518 | write_phy_reg(pi, 0x939, rfoverride4val_old); | 1471 | write_phy_reg(pi, 0x43c, afectrlovrval_old); |
1519 | write_phy_reg(pi, 0x43b, afectrlovr_old); | 1472 | write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); |
1520 | write_phy_reg(pi, 0x43c, afectrlovrval_old); | 1473 | write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); |
1521 | write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); | ||
1522 | write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); | ||
1523 | 1474 | ||
1524 | wlc_lcnphy_clear_trsw_override(pi); | 1475 | wlc_lcnphy_clear_trsw_override(pi); |
1525 | 1476 | ||
1526 | mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); | 1477 | mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); |
1527 | 1478 | ||
1528 | for (i = 0; i < 11; i++) | 1479 | for (i = 0; i < 11; i++) |
1529 | write_radio_reg(pi, rxiq_cal_rf_reg[i], | 1480 | write_radio_reg(pi, rxiq_cal_rf_reg[i], |
1530 | values_to_save[i]); | 1481 | values_to_save[i]); |
1531 | 1482 | ||
1532 | if (tx_gain_override_old) | 1483 | if (tx_gain_override_old) |
1533 | wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); | 1484 | wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); |
1534 | else | 1485 | else |
1535 | wlc_lcnphy_disable_tx_gain_override(pi); | 1486 | wlc_lcnphy_disable_tx_gain_override(pi); |
1536 | 1487 | ||
1537 | wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); | 1488 | wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); |
1538 | wlc_lcnphy_rx_gain_override_enable(pi, false); | 1489 | wlc_lcnphy_rx_gain_override_enable(pi, false); |
1490 | } | ||
1539 | 1491 | ||
1540 | cal_done: | 1492 | cal_done: |
1541 | kfree(ptr); | 1493 | kfree(ptr); |
@@ -1829,17 +1781,6 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel) | |||
1829 | write_radio_reg(pi, RADIO_2064_REG038, 3); | 1781 | write_radio_reg(pi, RADIO_2064_REG038, 3); |
1830 | write_radio_reg(pi, RADIO_2064_REG091, 7); | 1782 | write_radio_reg(pi, RADIO_2064_REG091, 7); |
1831 | } | 1783 | } |
1832 | |||
1833 | if (!(pi->sh->boardflags & BFL_FEM)) { | ||
1834 | u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc, | ||
1835 | 0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0}; | ||
1836 | |||
1837 | write_radio_reg(pi, RADIO_2064_REG02A, 0xf); | ||
1838 | write_radio_reg(pi, RADIO_2064_REG091, 0x3); | ||
1839 | write_radio_reg(pi, RADIO_2064_REG038, 0x3); | ||
1840 | |||
1841 | write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]); | ||
1842 | } | ||
1843 | } | 1784 | } |
1844 | 1785 | ||
1845 | static int | 1786 | static int |
@@ -2034,16 +1975,6 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos) | |||
2034 | } else { | 1975 | } else { |
2035 | mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); | 1976 | mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); |
2036 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); | 1977 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); |
2037 | mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0); | ||
2038 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2); | ||
2039 | mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0); | ||
2040 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4); | ||
2041 | mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); | ||
2042 | mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77); | ||
2043 | mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1); | ||
2044 | mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7); | ||
2045 | mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1); | ||
2046 | mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4); | ||
2047 | } | 1978 | } |
2048 | } else { | 1979 | } else { |
2049 | mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); | 1980 | mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); |
@@ -2130,14 +2061,12 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi) | |||
2130 | (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); | 2061 | (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); |
2131 | 2062 | ||
2132 | mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); | 2063 | mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); |
2133 | mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0)); | ||
2134 | } | 2064 | } |
2135 | 2065 | ||
2136 | static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) | 2066 | static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) |
2137 | { | 2067 | { |
2138 | struct phytbl_info tab; | 2068 | struct phytbl_info tab; |
2139 | u32 rfseq, ind; | 2069 | u32 rfseq, ind; |
2140 | u8 tssi_sel; | ||
2141 | 2070 | ||
2142 | tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; | 2071 | tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; |
2143 | tab.tbl_width = 32; | 2072 | tab.tbl_width = 32; |
@@ -2159,13 +2088,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) | |||
2159 | 2088 | ||
2160 | mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); | 2089 | mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); |
2161 | 2090 | ||
2162 | if (pi->sh->boardflags & BFL_FEM) { | 2091 | wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); |
2163 | tssi_sel = 0x1; | ||
2164 | wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); | ||
2165 | } else { | ||
2166 | tssi_sel = 0xe; | ||
2167 | wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA); | ||
2168 | } | ||
2169 | mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); | 2092 | mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); |
2170 | 2093 | ||
2171 | mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); | 2094 | mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); |
@@ -2201,10 +2124,9 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) | |||
2201 | mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); | 2124 | mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); |
2202 | 2125 | ||
2203 | if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { | 2126 | if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { |
2204 | mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel); | 2127 | mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe); |
2205 | mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); | 2128 | mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); |
2206 | } else { | 2129 | } else { |
2207 | mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1); | ||
2208 | mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); | 2130 | mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); |
2209 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); | 2131 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); |
2210 | } | 2132 | } |
@@ -2251,10 +2173,6 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) | |||
2251 | 2173 | ||
2252 | mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); | 2174 | mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); |
2253 | 2175 | ||
2254 | mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0); | ||
2255 | mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); | ||
2256 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); | ||
2257 | |||
2258 | wlc_lcnphy_pwrctrl_rssiparams(pi); | 2176 | wlc_lcnphy_pwrctrl_rssiparams(pi); |
2259 | } | 2177 | } |
2260 | 2178 | ||
@@ -2873,8 +2791,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) | |||
2873 | read_radio_reg(pi, RADIO_2064_REG007) & 1; | 2791 | read_radio_reg(pi, RADIO_2064_REG007) & 1; |
2874 | u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; | 2792 | u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; |
2875 | u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; | 2793 | u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; |
2876 | u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi); | ||
2877 | |||
2878 | idleTssi = read_phy_reg(pi, 0x4ab); | 2794 | idleTssi = read_phy_reg(pi, 0x4ab); |
2879 | suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & | 2795 | suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & |
2880 | MCTL_EN_MAC)); | 2796 | MCTL_EN_MAC)); |
@@ -2892,12 +2808,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) | |||
2892 | mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); | 2808 | mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); |
2893 | mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); | 2809 | mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); |
2894 | wlc_lcnphy_tssi_setup(pi); | 2810 | wlc_lcnphy_tssi_setup(pi); |
2895 | |||
2896 | mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0)); | ||
2897 | mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6)); | ||
2898 | |||
2899 | wlc_lcnphy_set_bbmult(pi, 0x0); | ||
2900 | |||
2901 | wlc_phy_do_dummy_tx(pi, true, OFF); | 2811 | wlc_phy_do_dummy_tx(pi, true, OFF); |
2902 | idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) | 2812 | idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) |
2903 | >> 0); | 2813 | >> 0); |
@@ -2919,7 +2829,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) | |||
2919 | 2829 | ||
2920 | mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); | 2830 | mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); |
2921 | 2831 | ||
2922 | wlc_lcnphy_set_bbmult(pi, SAVE_bbmult); | ||
2923 | wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); | 2832 | wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); |
2924 | wlc_lcnphy_set_tx_gain(pi, &old_gains); | 2833 | wlc_lcnphy_set_tx_gain(pi, &old_gains); |
2925 | wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); | 2834 | wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); |
@@ -3133,11 +3042,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi) | |||
3133 | wlc_lcnphy_write_table(pi, &tab); | 3042 | wlc_lcnphy_write_table(pi, &tab); |
3134 | tab.tbl_offset++; | 3043 | tab.tbl_offset++; |
3135 | } | 3044 | } |
3136 | mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0); | ||
3137 | mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0); | ||
3138 | mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8); | ||
3139 | mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4); | ||
3140 | mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2); | ||
3141 | 3045 | ||
3142 | mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); | 3046 | mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); |
3143 | 3047 | ||
@@ -3939,6 +3843,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi) | |||
3939 | target_gains.pad_gain = 21; | 3843 | target_gains.pad_gain = 21; |
3940 | target_gains.dac_gain = 0; | 3844 | target_gains.dac_gain = 0; |
3941 | wlc_lcnphy_set_tx_gain(pi, &target_gains); | 3845 | wlc_lcnphy_set_tx_gain(pi, &target_gains); |
3846 | wlc_lcnphy_set_tx_pwr_by_index(pi, 16); | ||
3942 | 3847 | ||
3943 | if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { | 3848 | if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { |
3944 | 3849 | ||
@@ -3949,7 +3854,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi) | |||
3949 | lcnphy_recal ? LCNPHY_CAL_RECAL : | 3854 | lcnphy_recal ? LCNPHY_CAL_RECAL : |
3950 | LCNPHY_CAL_FULL), false); | 3855 | LCNPHY_CAL_FULL), false); |
3951 | } else { | 3856 | } else { |
3952 | wlc_lcnphy_set_tx_pwr_by_index(pi, 16); | ||
3953 | wlc_lcnphy_tx_iqlo_soft_cal_full(pi); | 3857 | wlc_lcnphy_tx_iqlo_soft_cal_full(pi); |
3954 | } | 3858 | } |
3955 | 3859 | ||
@@ -4374,22 +4278,17 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi, | |||
4374 | if (CHSPEC_IS5G(pi->radio_chanspec)) | 4278 | if (CHSPEC_IS5G(pi->radio_chanspec)) |
4375 | pa_gain = 0x70; | 4279 | pa_gain = 0x70; |
4376 | else | 4280 | else |
4377 | pa_gain = 0x60; | 4281 | pa_gain = 0x70; |
4378 | 4282 | ||
4379 | if (pi->sh->boardflags & BFL_FEM) | 4283 | if (pi->sh->boardflags & BFL_FEM) |
4380 | pa_gain = 0x10; | 4284 | pa_gain = 0x10; |
4381 | |||
4382 | tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; | 4285 | tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; |
4383 | tab.tbl_width = 32; | 4286 | tab.tbl_width = 32; |
4384 | tab.tbl_len = 1; | 4287 | tab.tbl_len = 1; |
4385 | tab.tbl_ptr = &val; | 4288 | tab.tbl_ptr = &val; |
4386 | 4289 | ||
4387 | for (j = 0; j < 128; j++) { | 4290 | for (j = 0; j < 128; j++) { |
4388 | if (pi->sh->boardflags & BFL_FEM) | 4291 | gm_gain = gain_table[j].gm; |
4389 | gm_gain = gain_table[j].gm; | ||
4390 | else | ||
4391 | gm_gain = 15; | ||
4392 | |||
4393 | val = (((u32) pa_gain << 24) | | 4292 | val = (((u32) pa_gain << 24) | |
4394 | (gain_table[j].pad << 16) | | 4293 | (gain_table[j].pad << 16) | |
4395 | (gain_table[j].pga << 8) | gm_gain); | 4294 | (gain_table[j].pga << 8) | gm_gain); |
@@ -4600,10 +4499,7 @@ static void wlc_radio_2064_init(struct brcms_phy *pi) | |||
4600 | 4499 | ||
4601 | write_phy_reg(pi, 0x4ea, 0x4688); | 4500 | write_phy_reg(pi, 0x4ea, 0x4688); |
4602 | 4501 | ||
4603 | if (pi->sh->boardflags & BFL_FEM) | 4502 | mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); |
4604 | mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); | ||
4605 | else | ||
4606 | mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0); | ||
4607 | 4503 | ||
4608 | mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); | 4504 | mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); |
4609 | 4505 | ||
@@ -4614,13 +4510,6 @@ static void wlc_radio_2064_init(struct brcms_phy *pi) | |||
4614 | wlc_lcnphy_rcal(pi); | 4510 | wlc_lcnphy_rcal(pi); |
4615 | 4511 | ||
4616 | wlc_lcnphy_rc_cal(pi); | 4512 | wlc_lcnphy_rc_cal(pi); |
4617 | |||
4618 | if (!(pi->sh->boardflags & BFL_FEM)) { | ||
4619 | write_radio_reg(pi, RADIO_2064_REG032, 0x6f); | ||
4620 | write_radio_reg(pi, RADIO_2064_REG033, 0x19); | ||
4621 | write_radio_reg(pi, RADIO_2064_REG039, 0xe); | ||
4622 | } | ||
4623 | |||
4624 | } | 4513 | } |
4625 | 4514 | ||
4626 | static void wlc_lcnphy_radio_init(struct brcms_phy *pi) | 4515 | static void wlc_lcnphy_radio_init(struct brcms_phy *pi) |
@@ -4650,20 +4539,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi) | |||
4650 | wlc_lcnphy_write_table(pi, &tab); | 4539 | wlc_lcnphy_write_table(pi, &tab); |
4651 | } | 4540 | } |
4652 | 4541 | ||
4653 | if (!(pi->sh->boardflags & BFL_FEM)) { | 4542 | tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; |
4654 | tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; | 4543 | tab.tbl_width = 16; |
4655 | tab.tbl_width = 16; | 4544 | tab.tbl_ptr = &val; |
4656 | tab.tbl_ptr = &val; | 4545 | tab.tbl_len = 1; |
4657 | tab.tbl_len = 1; | ||
4658 | 4546 | ||
4659 | val = 150; | 4547 | val = 114; |
4660 | tab.tbl_offset = 0; | 4548 | tab.tbl_offset = 0; |
4661 | wlc_lcnphy_write_table(pi, &tab); | 4549 | wlc_lcnphy_write_table(pi, &tab); |
4662 | 4550 | ||
4663 | val = 220; | 4551 | val = 130; |
4664 | tab.tbl_offset = 1; | 4552 | tab.tbl_offset = 1; |
4665 | wlc_lcnphy_write_table(pi, &tab); | 4553 | wlc_lcnphy_write_table(pi, &tab); |
4666 | } | 4554 | |
4555 | val = 6; | ||
4556 | tab.tbl_offset = 8; | ||
4557 | wlc_lcnphy_write_table(pi, &tab); | ||
4667 | 4558 | ||
4668 | if (CHSPEC_IS2G(pi->radio_chanspec)) { | 4559 | if (CHSPEC_IS2G(pi->radio_chanspec)) { |
4669 | if (pi->sh->boardflags & BFL_FEM) | 4560 | if (pi->sh->boardflags & BFL_FEM) |
@@ -5055,7 +4946,6 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec) | |||
5055 | wlc_lcnphy_load_tx_iir_filter(pi, true, 3); | 4946 | wlc_lcnphy_load_tx_iir_filter(pi, true, 3); |
5056 | 4947 | ||
5057 | mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); | 4948 | mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); |
5058 | wlc_lcnphy_tssi_setup(pi); | ||
5059 | } | 4949 | } |
5060 | 4950 | ||
5061 | void wlc_phy_detach_lcnphy(struct brcms_phy *pi) | 4951 | void wlc_phy_detach_lcnphy(struct brcms_phy *pi) |
@@ -5094,7 +4984,8 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi) | |||
5094 | if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) | 4984 | if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) |
5095 | return false; | 4985 | return false; |
5096 | 4986 | ||
5097 | if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { | 4987 | if ((pi->sh->boardflags & BFL_FEM) && |
4988 | (LCNREV_IS(pi->pubpi.phy_rev, 1))) { | ||
5098 | if (pi_lcn->lcnphy_tempsense_option == 3) { | 4989 | if (pi_lcn->lcnphy_tempsense_option == 3) { |
5099 | pi->hwpwrctrl = true; | 4990 | pi->hwpwrctrl = true; |
5100 | pi->hwpwrctrl_capable = true; | 4991 | pi->hwpwrctrl_capable = true; |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c index b7e95acc2084..622c01ca72c5 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c | |||
@@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = { | |||
1992 | }; | 1992 | }; |
1993 | 1993 | ||
1994 | static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { | 1994 | static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { |
1995 | 0x0009, | ||
1996 | 0x000a, | 1995 | 0x000a, |
1997 | 0x0005, | ||
1998 | 0x0006, | ||
1999 | 0x0009, | 1996 | 0x0009, |
2000 | 0x000a, | ||
2001 | 0x0005, | ||
2002 | 0x0006, | 1997 | 0x0006, |
2003 | 0x0009, | ||
2004 | 0x000a, | ||
2005 | 0x0005, | 1998 | 0x0005, |
2006 | 0x0006, | ||
2007 | 0x0009, | ||
2008 | 0x000a, | 1999 | 0x000a, |
2009 | 0x0005, | ||
2010 | 0x0006, | ||
2011 | 0x0009, | 2000 | 0x0009, |
2012 | 0x000a, | ||
2013 | 0x0005, | ||
2014 | 0x0006, | 2001 | 0x0006, |
2015 | 0x0009, | ||
2016 | 0x000a, | ||
2017 | 0x0005, | 2002 | 0x0005, |
2018 | 0x0006, | ||
2019 | 0x0009, | ||
2020 | 0x000a, | 2003 | 0x000a, |
2021 | 0x0005, | ||
2022 | 0x0006, | ||
2023 | 0x0009, | 2004 | 0x0009, |
2024 | 0x000a, | ||
2025 | 0x0005, | ||
2026 | 0x0006, | 2005 | 0x0006, |
2027 | 0x0009, | ||
2028 | 0x000a, | ||
2029 | 0x0005, | 2006 | 0x0005, |
2030 | 0x0006, | ||
2031 | 0x0009, | ||
2032 | 0x000a, | 2007 | 0x000a, |
2033 | 0x0005, | ||
2034 | 0x0006, | ||
2035 | 0x0009, | 2008 | 0x0009, |
2036 | 0x000a, | ||
2037 | 0x0005, | ||
2038 | 0x0006, | 2009 | 0x0006, |
2039 | 0x0009, | ||
2040 | 0x000a, | ||
2041 | 0x0005, | 2010 | 0x0005, |
2042 | 0x0006, | 2011 | 0x000a, |
2043 | 0x0009, | 2012 | 0x0009, |
2013 | 0x0006, | ||
2014 | 0x0005, | ||
2044 | 0x000a, | 2015 | 0x000a, |
2016 | 0x0009, | ||
2017 | 0x0006, | ||
2045 | 0x0005, | 2018 | 0x0005, |
2019 | 0x000a, | ||
2020 | 0x0009, | ||
2046 | 0x0006, | 2021 | 0x0006, |
2022 | 0x0005, | ||
2023 | 0x000a, | ||
2047 | 0x0009, | 2024 | 0x0009, |
2025 | 0x0006, | ||
2026 | 0x0005, | ||
2048 | 0x000a, | 2027 | 0x000a, |
2028 | 0x0009, | ||
2029 | 0x0006, | ||
2049 | 0x0005, | 2030 | 0x0005, |
2031 | 0x000a, | ||
2032 | 0x0009, | ||
2050 | 0x0006, | 2033 | 0x0006, |
2034 | 0x0005, | ||
2035 | 0x000a, | ||
2051 | 0x0009, | 2036 | 0x0009, |
2037 | 0x0006, | ||
2038 | 0x0005, | ||
2052 | 0x000a, | 2039 | 0x000a, |
2040 | 0x0009, | ||
2041 | 0x0006, | ||
2053 | 0x0005, | 2042 | 0x0005, |
2043 | 0x000a, | ||
2044 | 0x0009, | ||
2054 | 0x0006, | 2045 | 0x0006, |
2046 | 0x0005, | ||
2047 | 0x000a, | ||
2055 | 0x0009, | 2048 | 0x0009, |
2049 | 0x0006, | ||
2050 | 0x0005, | ||
2056 | 0x000a, | 2051 | 0x000a, |
2052 | 0x0009, | ||
2053 | 0x0006, | ||
2057 | 0x0005, | 2054 | 0x0005, |
2055 | 0x000a, | ||
2056 | 0x0009, | ||
2058 | 0x0006, | 2057 | 0x0006, |
2058 | 0x0005, | ||
2059 | }; | 2059 | }; |
2060 | 2060 | ||
2061 | static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { | 2061 | static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { |
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c index e8324b5e5bfe..6c7493c2d698 100644 --- a/drivers/net/wireless/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/iwlegacy/4965-rs.c | |||
@@ -2152,7 +2152,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf, | |||
2152 | int rate_idx; | 2152 | int rate_idx; |
2153 | int i; | 2153 | int i; |
2154 | u32 rate; | 2154 | u32 rate; |
2155 | u8 use_green = il4965_rs_use_green(il, sta); | 2155 | u8 use_green; |
2156 | u8 active_tbl = 0; | 2156 | u8 active_tbl = 0; |
2157 | u8 valid_tx_ant; | 2157 | u8 valid_tx_ant; |
2158 | struct il_station_priv *sta_priv; | 2158 | struct il_station_priv *sta_priv; |
@@ -2160,6 +2160,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf, | |||
2160 | if (!sta || !lq_sta) | 2160 | if (!sta || !lq_sta) |
2161 | return; | 2161 | return; |
2162 | 2162 | ||
2163 | use_green = il4965_rs_use_green(il, sta); | ||
2163 | sta_priv = (void *)sta->drv_priv; | 2164 | sta_priv = (void *)sta->drv_priv; |
2164 | 2165 | ||
2165 | i = lq_sta->last_txrate_idx; | 2166 | i = lq_sta->last_txrate_idx; |
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index 86ea5f4c3939..44ca0e57f9f7 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c | |||
@@ -1262,6 +1262,15 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1262 | } | 1262 | } |
1263 | 1263 | ||
1264 | /* | 1264 | /* |
1265 | * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag | ||
1266 | * in iwl_down but cancel the workers only later. | ||
1267 | */ | ||
1268 | if (!priv->ucode_loaded) { | ||
1269 | IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id); | ||
1270 | return -EIO; | ||
1271 | } | ||
1272 | |||
1273 | /* | ||
1265 | * Synchronous commands from this op-mode must hold | 1274 | * Synchronous commands from this op-mode must hold |
1266 | * the mutex, this ensures we don't try to send two | 1275 | * the mutex, this ensures we don't try to send two |
1267 | * (or more) synchronous commands at a time. | 1276 | * (or more) synchronous commands at a time. |
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 23be948cf162..a82b6b39d4ff 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c | |||
@@ -1419,6 +1419,14 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, | |||
1419 | 1419 | ||
1420 | mutex_lock(&priv->mutex); | 1420 | mutex_lock(&priv->mutex); |
1421 | 1421 | ||
1422 | if (changes & BSS_CHANGED_IDLE && bss_conf->idle) { | ||
1423 | /* | ||
1424 | * If we go idle, then clearly no "passive-no-rx" | ||
1425 | * workaround is needed any more, this is a reset. | ||
1426 | */ | ||
1427 | iwlagn_lift_passive_no_rx(priv); | ||
1428 | } | ||
1429 | |||
1422 | if (unlikely(!iwl_is_ready(priv))) { | 1430 | if (unlikely(!iwl_is_ready(priv))) { |
1423 | IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); | 1431 | IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); |
1424 | mutex_unlock(&priv->mutex); | 1432 | mutex_unlock(&priv->mutex); |
@@ -1450,16 +1458,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, | |||
1450 | priv->timestamp = bss_conf->sync_tsf; | 1458 | priv->timestamp = bss_conf->sync_tsf; |
1451 | ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; | 1459 | ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; |
1452 | } else { | 1460 | } else { |
1453 | /* | ||
1454 | * If we disassociate while there are pending | ||
1455 | * frames, just wake up the queues and let the | ||
1456 | * frames "escape" ... This shouldn't really | ||
1457 | * be happening to start with, but we should | ||
1458 | * not get stuck in this case either since it | ||
1459 | * can happen if userspace gets confused. | ||
1460 | */ | ||
1461 | iwlagn_lift_passive_no_rx(priv); | ||
1462 | |||
1463 | ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | 1461 | ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; |
1464 | 1462 | ||
1465 | if (ctx->ctxid == IWL_RXON_CTX_BSS) | 1463 | if (ctx->ctxid == IWL_RXON_CTX_BSS) |
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 6aec2df3bb27..d1a670d7b10c 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c | |||
@@ -1192,7 +1192,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, | |||
1192 | memset(&info->status, 0, sizeof(info->status)); | 1192 | memset(&info->status, 0, sizeof(info->status)); |
1193 | 1193 | ||
1194 | if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && | 1194 | if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && |
1195 | iwl_is_associated_ctx(ctx) && ctx->vif && | 1195 | ctx->vif && |
1196 | ctx->vif->type == NL80211_IFTYPE_STATION) { | 1196 | ctx->vif->type == NL80211_IFTYPE_STATION) { |
1197 | /* block and stop all queues */ | 1197 | /* block and stop all queues */ |
1198 | priv->passive_no_rx = true; | 1198 | priv->passive_no_rx = true; |
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index 736fe9bb140e..1a4ac9236a44 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c | |||
@@ -367,6 +367,8 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, | |||
367 | return -EIO; | 367 | return -EIO; |
368 | } | 368 | } |
369 | 369 | ||
370 | priv->ucode_loaded = true; | ||
371 | |||
370 | if (ucode_type != IWL_UCODE_WOWLAN) { | 372 | if (ucode_type != IWL_UCODE_WOWLAN) { |
371 | /* delay a bit to give rfkill time to run */ | 373 | /* delay a bit to give rfkill time to run */ |
372 | msleep(5); | 374 | msleep(5); |
@@ -380,8 +382,6 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, | |||
380 | return ret; | 382 | return ret; |
381 | } | 383 | } |
382 | 384 | ||
383 | priv->ucode_loaded = true; | ||
384 | |||
385 | return 0; | 385 | return 0; |
386 | } | 386 | } |
387 | 387 | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 17bedc50e753..12c4f31ca8fb 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -475,6 +475,10 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, | |||
475 | 475 | ||
476 | /* If platform's RF_KILL switch is NOT set to KILL */ | 476 | /* If platform's RF_KILL switch is NOT set to KILL */ |
477 | hw_rfkill = iwl_is_rfkill_set(trans); | 477 | hw_rfkill = iwl_is_rfkill_set(trans); |
478 | if (hw_rfkill) | ||
479 | set_bit(STATUS_RFKILL, &trans_pcie->status); | ||
480 | else | ||
481 | clear_bit(STATUS_RFKILL, &trans_pcie->status); | ||
478 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 482 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
479 | if (hw_rfkill && !run_in_rfkill) | 483 | if (hw_rfkill && !run_in_rfkill) |
480 | return -ERFKILL; | 484 | return -ERFKILL; |
@@ -641,6 +645,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, | |||
641 | 645 | ||
642 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | 646 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) |
643 | { | 647 | { |
648 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
644 | bool hw_rfkill; | 649 | bool hw_rfkill; |
645 | int err; | 650 | int err; |
646 | 651 | ||
@@ -656,6 +661,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | |||
656 | iwl_enable_rfkill_int(trans); | 661 | iwl_enable_rfkill_int(trans); |
657 | 662 | ||
658 | hw_rfkill = iwl_is_rfkill_set(trans); | 663 | hw_rfkill = iwl_is_rfkill_set(trans); |
664 | if (hw_rfkill) | ||
665 | set_bit(STATUS_RFKILL, &trans_pcie->status); | ||
666 | else | ||
667 | clear_bit(STATUS_RFKILL, &trans_pcie->status); | ||
659 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 668 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
660 | 669 | ||
661 | return 0; | 670 | return 0; |
@@ -694,6 +703,10 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, | |||
694 | * op_mode. | 703 | * op_mode. |
695 | */ | 704 | */ |
696 | hw_rfkill = iwl_is_rfkill_set(trans); | 705 | hw_rfkill = iwl_is_rfkill_set(trans); |
706 | if (hw_rfkill) | ||
707 | set_bit(STATUS_RFKILL, &trans_pcie->status); | ||
708 | else | ||
709 | clear_bit(STATUS_RFKILL, &trans_pcie->status); | ||
697 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 710 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
698 | } | 711 | } |
699 | } | 712 | } |
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 8595c16f74de..cb5c6792e3a8 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
@@ -1264,7 +1264,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, | |||
1264 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { | 1264 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { |
1265 | int copy = 0; | 1265 | int copy = 0; |
1266 | 1266 | ||
1267 | if (!cmd->len) | 1267 | if (!cmd->len[i]) |
1268 | continue; | 1268 | continue; |
1269 | 1269 | ||
1270 | /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ | 1270 | /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ |
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index a44023a7bd57..8aaf56ade4d9 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c | |||
@@ -1892,7 +1892,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, | |||
1892 | } | 1892 | } |
1893 | } | 1893 | } |
1894 | 1894 | ||
1895 | for (i = 0; i < request->n_channels; i++) { | 1895 | for (i = 0; i < min_t(u32, request->n_channels, |
1896 | MWIFIEX_USER_SCAN_CHAN_MAX); i++) { | ||
1896 | chan = request->channels[i]; | 1897 | chan = request->channels[i]; |
1897 | priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; | 1898 | priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; |
1898 | priv->user_scan_cfg->chan_list[i].radio_type = chan->band; | 1899 | priv->user_scan_cfg->chan_list[i].radio_type = chan->band; |
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 5c395e2e6a2b..feb204613397 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c | |||
@@ -1508,6 +1508,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) | |||
1508 | } | 1508 | } |
1509 | memcpy(adapter->upld_buf, skb->data, | 1509 | memcpy(adapter->upld_buf, skb->data, |
1510 | min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); | 1510 | min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); |
1511 | skb_push(skb, INTF_HEADER_LEN); | ||
1511 | if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, | 1512 | if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, |
1512 | PCI_DMA_FROMDEVICE)) | 1513 | PCI_DMA_FROMDEVICE)) |
1513 | return -1; | 1514 | return -1; |
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index d215b4d3c51b..e7f6deaf715e 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c | |||
@@ -1393,8 +1393,10 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, | |||
1393 | queue_work(adapter->workqueue, &adapter->main_work); | 1393 | queue_work(adapter->workqueue, &adapter->main_work); |
1394 | 1394 | ||
1395 | /* Perform internal scan synchronously */ | 1395 | /* Perform internal scan synchronously */ |
1396 | if (!priv->scan_request) | 1396 | if (!priv->scan_request) { |
1397 | dev_dbg(adapter->dev, "wait internal scan\n"); | ||
1397 | mwifiex_wait_queue_complete(adapter, cmd_node); | 1398 | mwifiex_wait_queue_complete(adapter, cmd_node); |
1399 | } | ||
1398 | } else { | 1400 | } else { |
1399 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, | 1401 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, |
1400 | flags); | 1402 | flags); |
@@ -1793,7 +1795,12 @@ check_next_scan: | |||
1793 | /* Need to indicate IOCTL complete */ | 1795 | /* Need to indicate IOCTL complete */ |
1794 | if (adapter->curr_cmd->wait_q_enabled) { | 1796 | if (adapter->curr_cmd->wait_q_enabled) { |
1795 | adapter->cmd_wait_q.status = 0; | 1797 | adapter->cmd_wait_q.status = 0; |
1796 | mwifiex_complete_cmd(adapter, adapter->curr_cmd); | 1798 | if (!priv->scan_request) { |
1799 | dev_dbg(adapter->dev, | ||
1800 | "complete internal scan\n"); | ||
1801 | mwifiex_complete_cmd(adapter, | ||
1802 | adapter->curr_cmd); | ||
1803 | } | ||
1797 | } | 1804 | } |
1798 | if (priv->report_scan_result) | 1805 | if (priv->report_scan_result) |
1799 | priv->report_scan_result = false; | 1806 | priv->report_scan_result = false; |
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index 2bf4efa33186..76cd47eb901e 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig | |||
@@ -20,6 +20,7 @@ if RT2X00 | |||
20 | config RT2400PCI | 20 | config RT2400PCI |
21 | tristate "Ralink rt2400 (PCI/PCMCIA) support" | 21 | tristate "Ralink rt2400 (PCI/PCMCIA) support" |
22 | depends on PCI | 22 | depends on PCI |
23 | select RT2X00_LIB_MMIO | ||
23 | select RT2X00_LIB_PCI | 24 | select RT2X00_LIB_PCI |
24 | select EEPROM_93CX6 | 25 | select EEPROM_93CX6 |
25 | ---help--- | 26 | ---help--- |
@@ -31,6 +32,7 @@ config RT2400PCI | |||
31 | config RT2500PCI | 32 | config RT2500PCI |
32 | tristate "Ralink rt2500 (PCI/PCMCIA) support" | 33 | tristate "Ralink rt2500 (PCI/PCMCIA) support" |
33 | depends on PCI | 34 | depends on PCI |
35 | select RT2X00_LIB_MMIO | ||
34 | select RT2X00_LIB_PCI | 36 | select RT2X00_LIB_PCI |
35 | select EEPROM_93CX6 | 37 | select EEPROM_93CX6 |
36 | ---help--- | 38 | ---help--- |
@@ -43,6 +45,7 @@ config RT61PCI | |||
43 | tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" | 45 | tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" |
44 | depends on PCI | 46 | depends on PCI |
45 | select RT2X00_LIB_PCI | 47 | select RT2X00_LIB_PCI |
48 | select RT2X00_LIB_MMIO | ||
46 | select RT2X00_LIB_FIRMWARE | 49 | select RT2X00_LIB_FIRMWARE |
47 | select RT2X00_LIB_CRYPTO | 50 | select RT2X00_LIB_CRYPTO |
48 | select CRC_ITU_T | 51 | select CRC_ITU_T |
@@ -57,6 +60,7 @@ config RT2800PCI | |||
57 | tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" | 60 | tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" |
58 | depends on PCI || SOC_RT288X || SOC_RT305X | 61 | depends on PCI || SOC_RT288X || SOC_RT305X |
59 | select RT2800_LIB | 62 | select RT2800_LIB |
63 | select RT2X00_LIB_MMIO | ||
60 | select RT2X00_LIB_PCI if PCI | 64 | select RT2X00_LIB_PCI if PCI |
61 | select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X | 65 | select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X |
62 | select RT2X00_LIB_FIRMWARE | 66 | select RT2X00_LIB_FIRMWARE |
@@ -185,6 +189,9 @@ endif | |||
185 | config RT2800_LIB | 189 | config RT2800_LIB |
186 | tristate | 190 | tristate |
187 | 191 | ||
192 | config RT2X00_LIB_MMIO | ||
193 | tristate | ||
194 | |||
188 | config RT2X00_LIB_PCI | 195 | config RT2X00_LIB_PCI |
189 | tristate | 196 | tristate |
190 | select RT2X00_LIB | 197 | select RT2X00_LIB |
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile index 349d5b8284a4..f069d8bc5b67 100644 --- a/drivers/net/wireless/rt2x00/Makefile +++ b/drivers/net/wireless/rt2x00/Makefile | |||
@@ -9,6 +9,7 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o | |||
9 | rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o | 9 | rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o |
10 | 10 | ||
11 | obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o | 11 | obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o |
12 | obj-$(CONFIG_RT2X00_LIB_MMIO) += rt2x00mmio.o | ||
12 | obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o | 13 | obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o |
13 | obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o | 14 | obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o |
14 | obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o | 15 | obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o |
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index 221beaaa83f1..dcfb54e0c516 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | 35 | ||
36 | #include "rt2x00.h" | 36 | #include "rt2x00.h" |
37 | #include "rt2x00mmio.h" | ||
37 | #include "rt2x00pci.h" | 38 | #include "rt2x00pci.h" |
38 | #include "rt2400pci.h" | 39 | #include "rt2400pci.h" |
39 | 40 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 39edc59e8d03..e1d2dc9ed28a 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | 35 | ||
36 | #include "rt2x00.h" | 36 | #include "rt2x00.h" |
37 | #include "rt2x00mmio.h" | ||
37 | #include "rt2x00pci.h" | 38 | #include "rt2x00pci.h" |
38 | #include "rt2500pci.h" | 39 | #include "rt2500pci.h" |
39 | 40 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index ded73da4de0b..ba5a05625aaa 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/eeprom_93cx6.h> | 41 | #include <linux/eeprom_93cx6.h> |
42 | 42 | ||
43 | #include "rt2x00.h" | 43 | #include "rt2x00.h" |
44 | #include "rt2x00mmio.h" | ||
44 | #include "rt2x00pci.h" | 45 | #include "rt2x00pci.h" |
45 | #include "rt2x00soc.h" | 46 | #include "rt2x00soc.h" |
46 | #include "rt2800lib.h" | 47 | #include "rt2800lib.h" |
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.c b/drivers/net/wireless/rt2x00/rt2x00mmio.c new file mode 100644 index 000000000000..d84a680ba0c9 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mmio.c | |||
@@ -0,0 +1,216 @@ | |||
1 | /* | ||
2 | Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> | ||
3 | <http://rt2x00.serialmonkey.com> | ||
4 | |||
5 | This program is free software; you can redistribute it and/or modify | ||
6 | it under the terms of the GNU General Public License as published by | ||
7 | the Free Software Foundation; either version 2 of the License, or | ||
8 | (at your option) any later version. | ||
9 | |||
10 | This program is distributed in the hope that it will be useful, | ||
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | GNU General Public License for more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License | ||
16 | along with this program; if not, write to the | ||
17 | Free Software Foundation, Inc., | ||
18 | 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | Module: rt2x00mmio | ||
23 | Abstract: rt2x00 generic mmio device routines. | ||
24 | */ | ||
25 | |||
26 | #include <linux/dma-mapping.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/slab.h> | ||
30 | |||
31 | #include "rt2x00.h" | ||
32 | #include "rt2x00mmio.h" | ||
33 | |||
34 | /* | ||
35 | * Register access. | ||
36 | */ | ||
37 | int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, | ||
38 | const unsigned int offset, | ||
39 | const struct rt2x00_field32 field, | ||
40 | u32 *reg) | ||
41 | { | ||
42 | unsigned int i; | ||
43 | |||
44 | if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) | ||
45 | return 0; | ||
46 | |||
47 | for (i = 0; i < REGISTER_BUSY_COUNT; i++) { | ||
48 | rt2x00pci_register_read(rt2x00dev, offset, reg); | ||
49 | if (!rt2x00_get_field32(*reg, field)) | ||
50 | return 1; | ||
51 | udelay(REGISTER_BUSY_DELAY); | ||
52 | } | ||
53 | |||
54 | printk_once(KERN_ERR "%s() Indirect register access failed: " | ||
55 | "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg); | ||
56 | *reg = ~0; | ||
57 | |||
58 | return 0; | ||
59 | } | ||
60 | EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); | ||
61 | |||
62 | bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) | ||
63 | { | ||
64 | struct data_queue *queue = rt2x00dev->rx; | ||
65 | struct queue_entry *entry; | ||
66 | struct queue_entry_priv_pci *entry_priv; | ||
67 | struct skb_frame_desc *skbdesc; | ||
68 | int max_rx = 16; | ||
69 | |||
70 | while (--max_rx) { | ||
71 | entry = rt2x00queue_get_entry(queue, Q_INDEX); | ||
72 | entry_priv = entry->priv_data; | ||
73 | |||
74 | if (rt2x00dev->ops->lib->get_entry_state(entry)) | ||
75 | break; | ||
76 | |||
77 | /* | ||
78 | * Fill in desc fields of the skb descriptor | ||
79 | */ | ||
80 | skbdesc = get_skb_frame_desc(entry->skb); | ||
81 | skbdesc->desc = entry_priv->desc; | ||
82 | skbdesc->desc_len = entry->queue->desc_size; | ||
83 | |||
84 | /* | ||
85 | * DMA is already done, notify rt2x00lib that | ||
86 | * it finished successfully. | ||
87 | */ | ||
88 | rt2x00lib_dmastart(entry); | ||
89 | rt2x00lib_dmadone(entry); | ||
90 | |||
91 | /* | ||
92 | * Send the frame to rt2x00lib for further processing. | ||
93 | */ | ||
94 | rt2x00lib_rxdone(entry, GFP_ATOMIC); | ||
95 | } | ||
96 | |||
97 | return !max_rx; | ||
98 | } | ||
99 | EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); | ||
100 | |||
101 | void rt2x00pci_flush_queue(struct data_queue *queue, bool drop) | ||
102 | { | ||
103 | unsigned int i; | ||
104 | |||
105 | for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) | ||
106 | msleep(10); | ||
107 | } | ||
108 | EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue); | ||
109 | |||
110 | /* | ||
111 | * Device initialization handlers. | ||
112 | */ | ||
113 | static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, | ||
114 | struct data_queue *queue) | ||
115 | { | ||
116 | struct queue_entry_priv_pci *entry_priv; | ||
117 | void *addr; | ||
118 | dma_addr_t dma; | ||
119 | unsigned int i; | ||
120 | |||
121 | /* | ||
122 | * Allocate DMA memory for descriptor and buffer. | ||
123 | */ | ||
124 | addr = dma_alloc_coherent(rt2x00dev->dev, | ||
125 | queue->limit * queue->desc_size, | ||
126 | &dma, GFP_KERNEL); | ||
127 | if (!addr) | ||
128 | return -ENOMEM; | ||
129 | |||
130 | memset(addr, 0, queue->limit * queue->desc_size); | ||
131 | |||
132 | /* | ||
133 | * Initialize all queue entries to contain valid addresses. | ||
134 | */ | ||
135 | for (i = 0; i < queue->limit; i++) { | ||
136 | entry_priv = queue->entries[i].priv_data; | ||
137 | entry_priv->desc = addr + i * queue->desc_size; | ||
138 | entry_priv->desc_dma = dma + i * queue->desc_size; | ||
139 | } | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, | ||
145 | struct data_queue *queue) | ||
146 | { | ||
147 | struct queue_entry_priv_pci *entry_priv = | ||
148 | queue->entries[0].priv_data; | ||
149 | |||
150 | if (entry_priv->desc) | ||
151 | dma_free_coherent(rt2x00dev->dev, | ||
152 | queue->limit * queue->desc_size, | ||
153 | entry_priv->desc, entry_priv->desc_dma); | ||
154 | entry_priv->desc = NULL; | ||
155 | } | ||
156 | |||
157 | int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) | ||
158 | { | ||
159 | struct data_queue *queue; | ||
160 | int status; | ||
161 | |||
162 | /* | ||
163 | * Allocate DMA | ||
164 | */ | ||
165 | queue_for_each(rt2x00dev, queue) { | ||
166 | status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); | ||
167 | if (status) | ||
168 | goto exit; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Register interrupt handler. | ||
173 | */ | ||
174 | status = request_irq(rt2x00dev->irq, | ||
175 | rt2x00dev->ops->lib->irq_handler, | ||
176 | IRQF_SHARED, rt2x00dev->name, rt2x00dev); | ||
177 | if (status) { | ||
178 | ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", | ||
179 | rt2x00dev->irq, status); | ||
180 | goto exit; | ||
181 | } | ||
182 | |||
183 | return 0; | ||
184 | |||
185 | exit: | ||
186 | queue_for_each(rt2x00dev, queue) | ||
187 | rt2x00pci_free_queue_dma(rt2x00dev, queue); | ||
188 | |||
189 | return status; | ||
190 | } | ||
191 | EXPORT_SYMBOL_GPL(rt2x00pci_initialize); | ||
192 | |||
193 | void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) | ||
194 | { | ||
195 | struct data_queue *queue; | ||
196 | |||
197 | /* | ||
198 | * Free irq line. | ||
199 | */ | ||
200 | free_irq(rt2x00dev->irq, rt2x00dev); | ||
201 | |||
202 | /* | ||
203 | * Free DMA | ||
204 | */ | ||
205 | queue_for_each(rt2x00dev, queue) | ||
206 | rt2x00pci_free_queue_dma(rt2x00dev, queue); | ||
207 | } | ||
208 | EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); | ||
209 | |||
210 | /* | ||
211 | * rt2x00mmio module information. | ||
212 | */ | ||
213 | MODULE_AUTHOR(DRV_PROJECT); | ||
214 | MODULE_VERSION(DRV_VERSION); | ||
215 | MODULE_DESCRIPTION("rt2x00 mmio library"); | ||
216 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.h b/drivers/net/wireless/rt2x00/rt2x00mmio.h new file mode 100644 index 000000000000..4ecaf60175bf --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mmio.h | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> | ||
3 | <http://rt2x00.serialmonkey.com> | ||
4 | |||
5 | This program is free software; you can redistribute it and/or modify | ||
6 | it under the terms of the GNU General Public License as published by | ||
7 | the Free Software Foundation; either version 2 of the License, or | ||
8 | (at your option) any later version. | ||
9 | |||
10 | This program is distributed in the hope that it will be useful, | ||
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | GNU General Public License for more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License | ||
16 | along with this program; if not, write to the | ||
17 | Free Software Foundation, Inc., | ||
18 | 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | Module: rt2x00mmio | ||
23 | Abstract: Data structures for the rt2x00mmio module. | ||
24 | */ | ||
25 | |||
26 | #ifndef RT2X00MMIO_H | ||
27 | #define RT2X00MMIO_H | ||
28 | |||
29 | #include <linux/io.h> | ||
30 | |||
31 | /* | ||
32 | * Register access. | ||
33 | */ | ||
34 | static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, | ||
35 | const unsigned int offset, | ||
36 | u32 *value) | ||
37 | { | ||
38 | *value = readl(rt2x00dev->csr.base + offset); | ||
39 | } | ||
40 | |||
41 | static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, | ||
42 | const unsigned int offset, | ||
43 | void *value, const u32 length) | ||
44 | { | ||
45 | memcpy_fromio(value, rt2x00dev->csr.base + offset, length); | ||
46 | } | ||
47 | |||
48 | static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, | ||
49 | const unsigned int offset, | ||
50 | u32 value) | ||
51 | { | ||
52 | writel(value, rt2x00dev->csr.base + offset); | ||
53 | } | ||
54 | |||
55 | static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, | ||
56 | const unsigned int offset, | ||
57 | const void *value, | ||
58 | const u32 length) | ||
59 | { | ||
60 | __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2); | ||
61 | } | ||
62 | |||
63 | /** | ||
64 | * rt2x00pci_regbusy_read - Read from register with busy check | ||
65 | * @rt2x00dev: Device pointer, see &struct rt2x00_dev. | ||
66 | * @offset: Register offset | ||
67 | * @field: Field to check if register is busy | ||
68 | * @reg: Pointer to where register contents should be stored | ||
69 | * | ||
70 | * This function will read the given register, and checks if the | ||
71 | * register is busy. If it is, it will sleep for a couple of | ||
72 | * microseconds before reading the register again. If the register | ||
73 | * is not read after a certain timeout, this function will return | ||
74 | * FALSE. | ||
75 | */ | ||
76 | int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, | ||
77 | const unsigned int offset, | ||
78 | const struct rt2x00_field32 field, | ||
79 | u32 *reg); | ||
80 | |||
81 | /** | ||
82 | * struct queue_entry_priv_pci: Per entry PCI specific information | ||
83 | * | ||
84 | * @desc: Pointer to device descriptor | ||
85 | * @desc_dma: DMA pointer to &desc. | ||
86 | * @data: Pointer to device's entry memory. | ||
87 | * @data_dma: DMA pointer to &data. | ||
88 | */ | ||
89 | struct queue_entry_priv_pci { | ||
90 | __le32 *desc; | ||
91 | dma_addr_t desc_dma; | ||
92 | }; | ||
93 | |||
94 | /** | ||
95 | * rt2x00pci_rxdone - Handle RX done events | ||
96 | * @rt2x00dev: Device pointer, see &struct rt2x00_dev. | ||
97 | * | ||
98 | * Returns true if there are still rx frames pending and false if all | ||
99 | * pending rx frames were processed. | ||
100 | */ | ||
101 | bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); | ||
102 | |||
103 | /** | ||
104 | * rt2x00pci_flush_queue - Flush data queue | ||
105 | * @queue: Data queue to stop | ||
106 | * @drop: True to drop all pending frames. | ||
107 | * | ||
108 | * This will wait for a maximum of 100ms, waiting for the queues | ||
109 | * to become empty. | ||
110 | */ | ||
111 | void rt2x00pci_flush_queue(struct data_queue *queue, bool drop); | ||
112 | |||
113 | /* | ||
114 | * Device initialization handlers. | ||
115 | */ | ||
116 | int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); | ||
117 | void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); | ||
118 | |||
119 | #endif /* RT2X00MMIO_H */ | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index a0c8caef3b0a..e87865e33113 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c | |||
@@ -33,182 +33,6 @@ | |||
33 | #include "rt2x00pci.h" | 33 | #include "rt2x00pci.h" |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Register access. | ||
37 | */ | ||
38 | int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, | ||
39 | const unsigned int offset, | ||
40 | const struct rt2x00_field32 field, | ||
41 | u32 *reg) | ||
42 | { | ||
43 | unsigned int i; | ||
44 | |||
45 | if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) | ||
46 | return 0; | ||
47 | |||
48 | for (i = 0; i < REGISTER_BUSY_COUNT; i++) { | ||
49 | rt2x00pci_register_read(rt2x00dev, offset, reg); | ||
50 | if (!rt2x00_get_field32(*reg, field)) | ||
51 | return 1; | ||
52 | udelay(REGISTER_BUSY_DELAY); | ||
53 | } | ||
54 | |||
55 | ERROR(rt2x00dev, "Indirect register access failed: " | ||
56 | "offset=0x%.08x, value=0x%.08x\n", offset, *reg); | ||
57 | *reg = ~0; | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); | ||
62 | |||
63 | bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) | ||
64 | { | ||
65 | struct data_queue *queue = rt2x00dev->rx; | ||
66 | struct queue_entry *entry; | ||
67 | struct queue_entry_priv_pci *entry_priv; | ||
68 | struct skb_frame_desc *skbdesc; | ||
69 | int max_rx = 16; | ||
70 | |||
71 | while (--max_rx) { | ||
72 | entry = rt2x00queue_get_entry(queue, Q_INDEX); | ||
73 | entry_priv = entry->priv_data; | ||
74 | |||
75 | if (rt2x00dev->ops->lib->get_entry_state(entry)) | ||
76 | break; | ||
77 | |||
78 | /* | ||
79 | * Fill in desc fields of the skb descriptor | ||
80 | */ | ||
81 | skbdesc = get_skb_frame_desc(entry->skb); | ||
82 | skbdesc->desc = entry_priv->desc; | ||
83 | skbdesc->desc_len = entry->queue->desc_size; | ||
84 | |||
85 | /* | ||
86 | * DMA is already done, notify rt2x00lib that | ||
87 | * it finished successfully. | ||
88 | */ | ||
89 | rt2x00lib_dmastart(entry); | ||
90 | rt2x00lib_dmadone(entry); | ||
91 | |||
92 | /* | ||
93 | * Send the frame to rt2x00lib for further processing. | ||
94 | */ | ||
95 | rt2x00lib_rxdone(entry, GFP_ATOMIC); | ||
96 | } | ||
97 | |||
98 | return !max_rx; | ||
99 | } | ||
100 | EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); | ||
101 | |||
102 | void rt2x00pci_flush_queue(struct data_queue *queue, bool drop) | ||
103 | { | ||
104 | unsigned int i; | ||
105 | |||
106 | for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) | ||
107 | msleep(10); | ||
108 | } | ||
109 | EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue); | ||
110 | |||
111 | /* | ||
112 | * Device initialization handlers. | ||
113 | */ | ||
114 | static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, | ||
115 | struct data_queue *queue) | ||
116 | { | ||
117 | struct queue_entry_priv_pci *entry_priv; | ||
118 | void *addr; | ||
119 | dma_addr_t dma; | ||
120 | unsigned int i; | ||
121 | |||
122 | /* | ||
123 | * Allocate DMA memory for descriptor and buffer. | ||
124 | */ | ||
125 | addr = dma_alloc_coherent(rt2x00dev->dev, | ||
126 | queue->limit * queue->desc_size, | ||
127 | &dma, GFP_KERNEL); | ||
128 | if (!addr) | ||
129 | return -ENOMEM; | ||
130 | |||
131 | memset(addr, 0, queue->limit * queue->desc_size); | ||
132 | |||
133 | /* | ||
134 | * Initialize all queue entries to contain valid addresses. | ||
135 | */ | ||
136 | for (i = 0; i < queue->limit; i++) { | ||
137 | entry_priv = queue->entries[i].priv_data; | ||
138 | entry_priv->desc = addr + i * queue->desc_size; | ||
139 | entry_priv->desc_dma = dma + i * queue->desc_size; | ||
140 | } | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, | ||
146 | struct data_queue *queue) | ||
147 | { | ||
148 | struct queue_entry_priv_pci *entry_priv = | ||
149 | queue->entries[0].priv_data; | ||
150 | |||
151 | if (entry_priv->desc) | ||
152 | dma_free_coherent(rt2x00dev->dev, | ||
153 | queue->limit * queue->desc_size, | ||
154 | entry_priv->desc, entry_priv->desc_dma); | ||
155 | entry_priv->desc = NULL; | ||
156 | } | ||
157 | |||
158 | int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) | ||
159 | { | ||
160 | struct data_queue *queue; | ||
161 | int status; | ||
162 | |||
163 | /* | ||
164 | * Allocate DMA | ||
165 | */ | ||
166 | queue_for_each(rt2x00dev, queue) { | ||
167 | status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); | ||
168 | if (status) | ||
169 | goto exit; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * Register interrupt handler. | ||
174 | */ | ||
175 | status = request_irq(rt2x00dev->irq, | ||
176 | rt2x00dev->ops->lib->irq_handler, | ||
177 | IRQF_SHARED, rt2x00dev->name, rt2x00dev); | ||
178 | if (status) { | ||
179 | ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", | ||
180 | rt2x00dev->irq, status); | ||
181 | goto exit; | ||
182 | } | ||
183 | |||
184 | return 0; | ||
185 | |||
186 | exit: | ||
187 | queue_for_each(rt2x00dev, queue) | ||
188 | rt2x00pci_free_queue_dma(rt2x00dev, queue); | ||
189 | |||
190 | return status; | ||
191 | } | ||
192 | EXPORT_SYMBOL_GPL(rt2x00pci_initialize); | ||
193 | |||
194 | void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) | ||
195 | { | ||
196 | struct data_queue *queue; | ||
197 | |||
198 | /* | ||
199 | * Free irq line. | ||
200 | */ | ||
201 | free_irq(rt2x00dev->irq, rt2x00dev); | ||
202 | |||
203 | /* | ||
204 | * Free DMA | ||
205 | */ | ||
206 | queue_for_each(rt2x00dev, queue) | ||
207 | rt2x00pci_free_queue_dma(rt2x00dev, queue); | ||
208 | } | ||
209 | EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); | ||
210 | |||
211 | /* | ||
212 | * PCI driver handlers. | 36 | * PCI driver handlers. |
213 | */ | 37 | */ |
214 | static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) | 38 | static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) |
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h index e2c99f2b9a14..60d90b20f8b9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.h +++ b/drivers/net/wireless/rt2x00/rt2x00pci.h | |||
@@ -36,94 +36,6 @@ | |||
36 | #define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) | 36 | #define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * Register access. | ||
40 | */ | ||
41 | static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, | ||
42 | const unsigned int offset, | ||
43 | u32 *value) | ||
44 | { | ||
45 | *value = readl(rt2x00dev->csr.base + offset); | ||
46 | } | ||
47 | |||
48 | static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, | ||
49 | const unsigned int offset, | ||
50 | void *value, const u32 length) | ||
51 | { | ||
52 | memcpy_fromio(value, rt2x00dev->csr.base + offset, length); | ||
53 | } | ||
54 | |||
55 | static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, | ||
56 | const unsigned int offset, | ||
57 | u32 value) | ||
58 | { | ||
59 | writel(value, rt2x00dev->csr.base + offset); | ||
60 | } | ||
61 | |||
62 | static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, | ||
63 | const unsigned int offset, | ||
64 | const void *value, | ||
65 | const u32 length) | ||
66 | { | ||
67 | __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2); | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * rt2x00pci_regbusy_read - Read from register with busy check | ||
72 | * @rt2x00dev: Device pointer, see &struct rt2x00_dev. | ||
73 | * @offset: Register offset | ||
74 | * @field: Field to check if register is busy | ||
75 | * @reg: Pointer to where register contents should be stored | ||
76 | * | ||
77 | * This function will read the given register, and checks if the | ||
78 | * register is busy. If it is, it will sleep for a couple of | ||
79 | * microseconds before reading the register again. If the register | ||
80 | * is not read after a certain timeout, this function will return | ||
81 | * FALSE. | ||
82 | */ | ||
83 | int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, | ||
84 | const unsigned int offset, | ||
85 | const struct rt2x00_field32 field, | ||
86 | u32 *reg); | ||
87 | |||
88 | /** | ||
89 | * struct queue_entry_priv_pci: Per entry PCI specific information | ||
90 | * | ||
91 | * @desc: Pointer to device descriptor | ||
92 | * @desc_dma: DMA pointer to &desc. | ||
93 | * @data: Pointer to device's entry memory. | ||
94 | * @data_dma: DMA pointer to &data. | ||
95 | */ | ||
96 | struct queue_entry_priv_pci { | ||
97 | __le32 *desc; | ||
98 | dma_addr_t desc_dma; | ||
99 | }; | ||
100 | |||
101 | /** | ||
102 | * rt2x00pci_rxdone - Handle RX done events | ||
103 | * @rt2x00dev: Device pointer, see &struct rt2x00_dev. | ||
104 | * | ||
105 | * Returns true if there are still rx frames pending and false if all | ||
106 | * pending rx frames were processed. | ||
107 | */ | ||
108 | bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); | ||
109 | |||
110 | /** | ||
111 | * rt2x00pci_flush_queue - Flush data queue | ||
112 | * @queue: Data queue to stop | ||
113 | * @drop: True to drop all pending frames. | ||
114 | * | ||
115 | * This will wait for a maximum of 100ms, waiting for the queues | ||
116 | * to become empty. | ||
117 | */ | ||
118 | void rt2x00pci_flush_queue(struct data_queue *queue, bool drop); | ||
119 | |||
120 | /* | ||
121 | * Device initialization handlers. | ||
122 | */ | ||
123 | int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); | ||
124 | void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); | ||
125 | |||
126 | /* | ||
127 | * PCI driver handlers. | 39 | * PCI driver handlers. |
128 | */ | 40 | */ |
129 | int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops); | 41 | int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops); |
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index f95792cfcf89..9e3c8ff53e3f 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/eeprom_93cx6.h> | 35 | #include <linux/eeprom_93cx6.h> |
36 | 36 | ||
37 | #include "rt2x00.h" | 37 | #include "rt2x00.h" |
38 | #include "rt2x00mmio.h" | ||
38 | #include "rt2x00pci.h" | 39 | #include "rt2x00pci.h" |
39 | #include "rt61pci.h" | 40 | #include "rt61pci.h" |
40 | 41 | ||
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c index eef38cfd812e..ca33ae193935 100644 --- a/drivers/nfc/microread/mei.c +++ b/drivers/nfc/microread/mei.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/gpio.h> | 24 | #include <linux/gpio.h> |
25 | #include <linux/mei_bus.h> | 25 | #include <linux/mei_cl_bus.h> |
26 | 26 | ||
27 | #include <linux/nfc.h> | 27 | #include <linux/nfc.h> |
28 | #include <net/nfc/hci.h> | 28 | #include <net/nfc/hci.h> |
@@ -32,9 +32,6 @@ | |||
32 | 32 | ||
33 | #define MICROREAD_DRIVER_NAME "microread" | 33 | #define MICROREAD_DRIVER_NAME "microread" |
34 | 34 | ||
35 | #define MICROREAD_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, 0x94, \ | ||
36 | 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c) | ||
37 | |||
38 | struct mei_nfc_hdr { | 35 | struct mei_nfc_hdr { |
39 | u8 cmd; | 36 | u8 cmd; |
40 | u8 status; | 37 | u8 status; |
@@ -48,7 +45,7 @@ struct mei_nfc_hdr { | |||
48 | #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) | 45 | #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) |
49 | 46 | ||
50 | struct microread_mei_phy { | 47 | struct microread_mei_phy { |
51 | struct mei_device *mei_device; | 48 | struct mei_cl_device *device; |
52 | struct nfc_hci_dev *hdev; | 49 | struct nfc_hci_dev *hdev; |
53 | 50 | ||
54 | int powered; | 51 | int powered; |
@@ -105,14 +102,14 @@ static int microread_mei_write(void *phy_id, struct sk_buff *skb) | |||
105 | 102 | ||
106 | MEI_DUMP_SKB_OUT("mei frame sent", skb); | 103 | MEI_DUMP_SKB_OUT("mei frame sent", skb); |
107 | 104 | ||
108 | r = mei_send(phy->device, skb->data, skb->len); | 105 | r = mei_cl_send(phy->device, skb->data, skb->len); |
109 | if (r > 0) | 106 | if (r > 0) |
110 | r = 0; | 107 | r = 0; |
111 | 108 | ||
112 | return r; | 109 | return r; |
113 | } | 110 | } |
114 | 111 | ||
115 | static void microread_event_cb(struct mei_device *device, u32 events, | 112 | static void microread_event_cb(struct mei_cl_device *device, u32 events, |
116 | void *context) | 113 | void *context) |
117 | { | 114 | { |
118 | struct microread_mei_phy *phy = context; | 115 | struct microread_mei_phy *phy = context; |
@@ -120,7 +117,7 @@ static void microread_event_cb(struct mei_device *device, u32 events, | |||
120 | if (phy->hard_fault != 0) | 117 | if (phy->hard_fault != 0) |
121 | return; | 118 | return; |
122 | 119 | ||
123 | if (events & BIT(MEI_EVENT_RX)) { | 120 | if (events & BIT(MEI_CL_EVENT_RX)) { |
124 | struct sk_buff *skb; | 121 | struct sk_buff *skb; |
125 | int reply_size; | 122 | int reply_size; |
126 | 123 | ||
@@ -128,7 +125,7 @@ static void microread_event_cb(struct mei_device *device, u32 events, | |||
128 | if (!skb) | 125 | if (!skb) |
129 | return; | 126 | return; |
130 | 127 | ||
131 | reply_size = mei_recv(device, skb->data, MEI_NFC_MAX_READ); | 128 | reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ); |
132 | if (reply_size < MEI_NFC_HEADER_SIZE) { | 129 | if (reply_size < MEI_NFC_HEADER_SIZE) { |
133 | kfree(skb); | 130 | kfree(skb); |
134 | return; | 131 | return; |
@@ -149,8 +146,8 @@ static struct nfc_phy_ops mei_phy_ops = { | |||
149 | .disable = microread_mei_disable, | 146 | .disable = microread_mei_disable, |
150 | }; | 147 | }; |
151 | 148 | ||
152 | static int microread_mei_probe(struct mei_device *device, | 149 | static int microread_mei_probe(struct mei_cl_device *device, |
153 | const struct mei_id *id) | 150 | const struct mei_cl_device_id *id) |
154 | { | 151 | { |
155 | struct microread_mei_phy *phy; | 152 | struct microread_mei_phy *phy; |
156 | int r; | 153 | int r; |
@@ -164,9 +161,9 @@ static int microread_mei_probe(struct mei_device *device, | |||
164 | } | 161 | } |
165 | 162 | ||
166 | phy->device = device; | 163 | phy->device = device; |
167 | mei_set_clientdata(device, phy); | 164 | mei_cl_set_drvdata(device, phy); |
168 | 165 | ||
169 | r = mei_register_event_cb(device, microread_event_cb, phy); | 166 | r = mei_cl_register_event_cb(device, microread_event_cb, phy); |
170 | if (r) { | 167 | if (r) { |
171 | pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n"); | 168 | pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n"); |
172 | goto err_out; | 169 | goto err_out; |
@@ -186,9 +183,9 @@ err_out: | |||
186 | return r; | 183 | return r; |
187 | } | 184 | } |
188 | 185 | ||
189 | static int microread_mei_remove(struct mei_device *device) | 186 | static int microread_mei_remove(struct mei_cl_device *device) |
190 | { | 187 | { |
191 | struct microread_mei_phy *phy = mei_get_clientdata(device); | 188 | struct microread_mei_phy *phy = mei_cl_get_drvdata(device); |
192 | 189 | ||
193 | pr_info("Removing microread\n"); | 190 | pr_info("Removing microread\n"); |
194 | 191 | ||
@@ -202,16 +199,15 @@ static int microread_mei_remove(struct mei_device *device) | |||
202 | return 0; | 199 | return 0; |
203 | } | 200 | } |
204 | 201 | ||
205 | static struct mei_id microread_mei_tbl[] = { | 202 | static struct mei_cl_device_id microread_mei_tbl[] = { |
206 | { MICROREAD_DRIVER_NAME, MICROREAD_UUID }, | 203 | { MICROREAD_DRIVER_NAME }, |
207 | 204 | ||
208 | /* required last entry */ | 205 | /* required last entry */ |
209 | { } | 206 | { } |
210 | }; | 207 | }; |
211 | |||
212 | MODULE_DEVICE_TABLE(mei, microread_mei_tbl); | 208 | MODULE_DEVICE_TABLE(mei, microread_mei_tbl); |
213 | 209 | ||
214 | static struct mei_driver microread_driver = { | 210 | static struct mei_cl_driver microread_driver = { |
215 | .id_table = microread_mei_tbl, | 211 | .id_table = microread_mei_tbl, |
216 | .name = MICROREAD_DRIVER_NAME, | 212 | .name = MICROREAD_DRIVER_NAME, |
217 | 213 | ||
@@ -225,7 +221,7 @@ static int microread_mei_init(void) | |||
225 | 221 | ||
226 | pr_debug(DRIVER_DESC ": %s\n", __func__); | 222 | pr_debug(DRIVER_DESC ": %s\n", __func__); |
227 | 223 | ||
228 | r = mei_driver_register(µread_driver); | 224 | r = mei_cl_driver_register(µread_driver); |
229 | if (r) { | 225 | if (r) { |
230 | pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n"); | 226 | pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n"); |
231 | return r; | 227 | return r; |
@@ -236,7 +232,7 @@ static int microread_mei_init(void) | |||
236 | 232 | ||
237 | static void microread_mei_exit(void) | 233 | static void microread_mei_exit(void) |
238 | { | 234 | { |
239 | mei_driver_unregister(µread_driver); | 235 | mei_cl_driver_unregister(µread_driver); |
240 | } | 236 | } |
241 | 237 | ||
242 | module_init(microread_mei_init); | 238 | module_init(microread_mei_init); |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index dee5dddaa292..5147c210df52 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -53,14 +53,15 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) | |||
53 | return; | 53 | return; |
54 | } | 54 | } |
55 | 55 | ||
56 | if (!pci_dev->pm_cap || !pci_dev->pme_support | 56 | /* Clear PME Status if set. */ |
57 | || pci_check_pme_status(pci_dev)) { | 57 | if (pci_dev->pme_support) |
58 | if (pci_dev->pme_poll) | 58 | pci_check_pme_status(pci_dev); |
59 | pci_dev->pme_poll = false; | ||
60 | 59 | ||
61 | pci_wakeup_event(pci_dev); | 60 | if (pci_dev->pme_poll) |
62 | pm_runtime_resume(&pci_dev->dev); | 61 | pci_dev->pme_poll = false; |
63 | } | 62 | |
63 | pci_wakeup_event(pci_dev); | ||
64 | pm_runtime_resume(&pci_dev->dev); | ||
64 | 65 | ||
65 | if (pci_dev->subordinate) | 66 | if (pci_dev->subordinate) |
66 | pci_pme_wakeup_bus(pci_dev->subordinate); | 67 | pci_pme_wakeup_bus(pci_dev->subordinate); |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 1fa1e482a999..79277fb36c6b 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -390,9 +390,10 @@ static void pci_device_shutdown(struct device *dev) | |||
390 | 390 | ||
391 | /* | 391 | /* |
392 | * Turn off Bus Master bit on the device to tell it to not | 392 | * Turn off Bus Master bit on the device to tell it to not |
393 | * continue to do DMA | 393 | * continue to do DMA. Don't touch devices in D3cold or unknown states. |
394 | */ | 394 | */ |
395 | pci_clear_master(pci_dev); | 395 | if (pci_dev->current_state <= PCI_D3hot) |
396 | pci_clear_master(pci_dev); | ||
396 | } | 397 | } |
397 | 398 | ||
398 | #ifdef CONFIG_PM | 399 | #ifdef CONFIG_PM |
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 08c243ab034e..ed4d09498337 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
@@ -185,14 +185,6 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { | |||
185 | #endif /* !PM */ | 185 | #endif /* !PM */ |
186 | 186 | ||
187 | /* | 187 | /* |
188 | * PCIe port runtime suspend is broken for some chipsets, so use a | ||
189 | * black list to disable runtime PM for these chipsets. | ||
190 | */ | ||
191 | static const struct pci_device_id port_runtime_pm_black_list[] = { | ||
192 | { /* end: all zeroes */ } | ||
193 | }; | ||
194 | |||
195 | /* | ||
196 | * pcie_portdrv_probe - Probe PCI-Express port devices | 188 | * pcie_portdrv_probe - Probe PCI-Express port devices |
197 | * @dev: PCI-Express port device being probed | 189 | * @dev: PCI-Express port device being probed |
198 | * | 190 | * |
@@ -225,16 +217,11 @@ static int pcie_portdrv_probe(struct pci_dev *dev, | |||
225 | * it by default. | 217 | * it by default. |
226 | */ | 218 | */ |
227 | dev->d3cold_allowed = false; | 219 | dev->d3cold_allowed = false; |
228 | if (!pci_match_id(port_runtime_pm_black_list, dev)) | ||
229 | pm_runtime_put_noidle(&dev->dev); | ||
230 | |||
231 | return 0; | 220 | return 0; |
232 | } | 221 | } |
233 | 222 | ||
234 | static void pcie_portdrv_remove(struct pci_dev *dev) | 223 | static void pcie_portdrv_remove(struct pci_dev *dev) |
235 | { | 224 | { |
236 | if (!pci_match_id(port_runtime_pm_black_list, dev)) | ||
237 | pm_runtime_get_noresume(&dev->dev); | ||
238 | pcie_port_device_remove(dev); | 225 | pcie_port_device_remove(dev); |
239 | pci_disable_device(dev); | 226 | pci_disable_device(dev); |
240 | } | 227 | } |
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index b41ac7756a4b..c5d0a08a8747 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c | |||
@@ -100,27 +100,6 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) | |||
100 | return min((size_t)(image - rom), size); | 100 | return min((size_t)(image - rom), size); |
101 | } | 101 | } |
102 | 102 | ||
103 | static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size) | ||
104 | { | ||
105 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; | ||
106 | loff_t start; | ||
107 | |||
108 | /* assign the ROM an address if it doesn't have one */ | ||
109 | if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE)) | ||
110 | return 0; | ||
111 | start = pci_resource_start(pdev, PCI_ROM_RESOURCE); | ||
112 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); | ||
113 | |||
114 | if (*size == 0) | ||
115 | return 0; | ||
116 | |||
117 | /* Enable ROM space decodes */ | ||
118 | if (pci_enable_rom(pdev)) | ||
119 | return 0; | ||
120 | |||
121 | return start; | ||
122 | } | ||
123 | |||
124 | /** | 103 | /** |
125 | * pci_map_rom - map a PCI ROM to kernel space | 104 | * pci_map_rom - map a PCI ROM to kernel space |
126 | * @pdev: pointer to pci device struct | 105 | * @pdev: pointer to pci device struct |
@@ -135,7 +114,7 @@ static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size) | |||
135 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) | 114 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) |
136 | { | 115 | { |
137 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; | 116 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
138 | loff_t start = 0; | 117 | loff_t start; |
139 | void __iomem *rom; | 118 | void __iomem *rom; |
140 | 119 | ||
141 | /* | 120 | /* |
@@ -154,21 +133,21 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) | |||
154 | return (void __iomem *)(unsigned long) | 133 | return (void __iomem *)(unsigned long) |
155 | pci_resource_start(pdev, PCI_ROM_RESOURCE); | 134 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
156 | } else { | 135 | } else { |
157 | start = pci_find_rom(pdev, size); | 136 | /* assign the ROM an address if it doesn't have one */ |
158 | } | 137 | if (res->parent == NULL && |
159 | } | 138 | pci_assign_resource(pdev,PCI_ROM_RESOURCE)) |
139 | return NULL; | ||
140 | start = pci_resource_start(pdev, PCI_ROM_RESOURCE); | ||
141 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); | ||
142 | if (*size == 0) | ||
143 | return NULL; | ||
160 | 144 | ||
161 | /* | 145 | /* Enable ROM space decodes */ |
162 | * Some devices may provide ROMs via a source other than the BAR | 146 | if (pci_enable_rom(pdev)) |
163 | */ | 147 | return NULL; |
164 | if (!start && pdev->rom && pdev->romlen) { | 148 | } |
165 | *size = pdev->romlen; | ||
166 | return phys_to_virt(pdev->rom); | ||
167 | } | 149 | } |
168 | 150 | ||
169 | if (!start) | ||
170 | return NULL; | ||
171 | |||
172 | rom = ioremap(start, *size); | 151 | rom = ioremap(start, *size); |
173 | if (!rom) { | 152 | if (!rom) { |
174 | /* restore enable if ioremap fails */ | 153 | /* restore enable if ioremap fails */ |
@@ -202,8 +181,7 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) | |||
202 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) | 181 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) |
203 | return; | 182 | return; |
204 | 183 | ||
205 | if (!pdev->rom || !pdev->romlen) | 184 | iounmap(rom); |
206 | iounmap(rom); | ||
207 | 185 | ||
208 | /* Disable again before continuing, leave enabled if pci=rom */ | 186 | /* Disable again before continuing, leave enabled if pci=rom */ |
209 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) | 187 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) |
@@ -227,7 +205,24 @@ void pci_cleanup_rom(struct pci_dev *pdev) | |||
227 | } | 205 | } |
228 | } | 206 | } |
229 | 207 | ||
208 | /** | ||
209 | * pci_platform_rom - provides a pointer to any ROM image provided by the | ||
210 | * platform | ||
211 | * @pdev: pointer to pci device struct | ||
212 | * @size: pointer to receive size of pci window over ROM | ||
213 | */ | ||
214 | void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) | ||
215 | { | ||
216 | if (pdev->rom && pdev->romlen) { | ||
217 | *size = pdev->romlen; | ||
218 | return phys_to_virt((phys_addr_t)pdev->rom); | ||
219 | } | ||
220 | |||
221 | return NULL; | ||
222 | } | ||
223 | |||
230 | EXPORT_SYMBOL(pci_map_rom); | 224 | EXPORT_SYMBOL(pci_map_rom); |
231 | EXPORT_SYMBOL(pci_unmap_rom); | 225 | EXPORT_SYMBOL(pci_unmap_rom); |
232 | EXPORT_SYMBOL_GPL(pci_enable_rom); | 226 | EXPORT_SYMBOL_GPL(pci_enable_rom); |
233 | EXPORT_SYMBOL_GPL(pci_disable_rom); | 227 | EXPORT_SYMBOL_GPL(pci_disable_rom); |
228 | EXPORT_SYMBOL(pci_platform_rom); | ||
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 45cacf79f3a7..1a779bbfb87d 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
@@ -134,7 +134,6 @@ static const struct key_entry hp_wmi_keymap[] = { | |||
134 | { KE_KEY, 0x2142, { KEY_MEDIA } }, | 134 | { KE_KEY, 0x2142, { KEY_MEDIA } }, |
135 | { KE_KEY, 0x213b, { KEY_INFO } }, | 135 | { KE_KEY, 0x213b, { KEY_INFO } }, |
136 | { KE_KEY, 0x2169, { KEY_DIRECTION } }, | 136 | { KE_KEY, 0x2169, { KEY_DIRECTION } }, |
137 | { KE_KEY, 0x216a, { KEY_SETUP } }, | ||
138 | { KE_KEY, 0x231b, { KEY_HELP } }, | 137 | { KE_KEY, 0x231b, { KEY_HELP } }, |
139 | { KE_END, 0 } | 138 | { KE_END, 0 } |
140 | }; | 139 | }; |
@@ -925,9 +924,6 @@ static int __init hp_wmi_init(void) | |||
925 | err = hp_wmi_input_setup(); | 924 | err = hp_wmi_input_setup(); |
926 | if (err) | 925 | if (err) |
927 | return err; | 926 | return err; |
928 | |||
929 | //Enable magic for hotkeys that run on the SMBus | ||
930 | ec_write(0xe6,0x6e); | ||
931 | } | 927 | } |
932 | 928 | ||
933 | if (bios_capable) { | 929 | if (bios_capable) { |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 9a907567f41e..edec135b1685 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -1964,9 +1964,6 @@ struct tp_nvram_state { | |||
1964 | /* kthread for the hotkey poller */ | 1964 | /* kthread for the hotkey poller */ |
1965 | static struct task_struct *tpacpi_hotkey_task; | 1965 | static struct task_struct *tpacpi_hotkey_task; |
1966 | 1966 | ||
1967 | /* Acquired while the poller kthread is running, use to sync start/stop */ | ||
1968 | static struct mutex hotkey_thread_mutex; | ||
1969 | |||
1970 | /* | 1967 | /* |
1971 | * Acquire mutex to write poller control variables as an | 1968 | * Acquire mutex to write poller control variables as an |
1972 | * atomic block. | 1969 | * atomic block. |
@@ -2462,8 +2459,6 @@ static int hotkey_kthread(void *data) | |||
2462 | unsigned int poll_freq; | 2459 | unsigned int poll_freq; |
2463 | bool was_frozen; | 2460 | bool was_frozen; |
2464 | 2461 | ||
2465 | mutex_lock(&hotkey_thread_mutex); | ||
2466 | |||
2467 | if (tpacpi_lifecycle == TPACPI_LIFE_EXITING) | 2462 | if (tpacpi_lifecycle == TPACPI_LIFE_EXITING) |
2468 | goto exit; | 2463 | goto exit; |
2469 | 2464 | ||
@@ -2523,7 +2518,6 @@ static int hotkey_kthread(void *data) | |||
2523 | } | 2518 | } |
2524 | 2519 | ||
2525 | exit: | 2520 | exit: |
2526 | mutex_unlock(&hotkey_thread_mutex); | ||
2527 | return 0; | 2521 | return 0; |
2528 | } | 2522 | } |
2529 | 2523 | ||
@@ -2533,9 +2527,6 @@ static void hotkey_poll_stop_sync(void) | |||
2533 | if (tpacpi_hotkey_task) { | 2527 | if (tpacpi_hotkey_task) { |
2534 | kthread_stop(tpacpi_hotkey_task); | 2528 | kthread_stop(tpacpi_hotkey_task); |
2535 | tpacpi_hotkey_task = NULL; | 2529 | tpacpi_hotkey_task = NULL; |
2536 | mutex_lock(&hotkey_thread_mutex); | ||
2537 | /* at this point, the thread did exit */ | ||
2538 | mutex_unlock(&hotkey_thread_mutex); | ||
2539 | } | 2530 | } |
2540 | } | 2531 | } |
2541 | 2532 | ||
@@ -3234,7 +3225,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
3234 | mutex_init(&hotkey_mutex); | 3225 | mutex_init(&hotkey_mutex); |
3235 | 3226 | ||
3236 | #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL | 3227 | #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL |
3237 | mutex_init(&hotkey_thread_mutex); | ||
3238 | mutex_init(&hotkey_thread_data_mutex); | 3228 | mutex_init(&hotkey_thread_data_mutex); |
3239 | #endif | 3229 | #endif |
3240 | 3230 | ||
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index cc1f7bf53fd0..c6d77e20622c 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig | |||
@@ -4,7 +4,7 @@ menu "Remoteproc drivers" | |||
4 | config REMOTEPROC | 4 | config REMOTEPROC |
5 | tristate | 5 | tristate |
6 | depends on HAS_DMA | 6 | depends on HAS_DMA |
7 | select FW_CONFIG | 7 | select FW_LOADER |
8 | select VIRTIO | 8 | select VIRTIO |
9 | 9 | ||
10 | config OMAP_REMOTEPROC | 10 | config OMAP_REMOTEPROC |
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 29387df4bfc9..8edb4aed5d36 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c | |||
@@ -217,7 +217,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) | |||
217 | * TODO: support predefined notifyids (via resource table) | 217 | * TODO: support predefined notifyids (via resource table) |
218 | */ | 218 | */ |
219 | ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); | 219 | ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); |
220 | if (ret) { | 220 | if (ret < 0) { |
221 | dev_err(dev, "idr_alloc failed: %d\n", ret); | 221 | dev_err(dev, "idr_alloc failed: %d\n", ret); |
222 | dma_free_coherent(dev->parent, size, va, dma); | 222 | dma_free_coherent(dev->parent, size, va, dma); |
223 | return ret; | 223 | return ret; |
@@ -366,10 +366,12 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, | |||
366 | /* it is now safe to add the virtio device */ | 366 | /* it is now safe to add the virtio device */ |
367 | ret = rproc_add_virtio_dev(rvdev, rsc->id); | 367 | ret = rproc_add_virtio_dev(rvdev, rsc->id); |
368 | if (ret) | 368 | if (ret) |
369 | goto free_rvdev; | 369 | goto remove_rvdev; |
370 | 370 | ||
371 | return 0; | 371 | return 0; |
372 | 372 | ||
373 | remove_rvdev: | ||
374 | list_del(&rvdev->node); | ||
373 | free_rvdev: | 375 | free_rvdev: |
374 | kfree(rvdev); | 376 | kfree(rvdev); |
375 | return ret; | 377 | return ret; |
diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c index a7743c069339..fb95c4220052 100644 --- a/drivers/remoteproc/ste_modem_rproc.c +++ b/drivers/remoteproc/ste_modem_rproc.c | |||
@@ -240,6 +240,8 @@ static int sproc_drv_remove(struct platform_device *pdev) | |||
240 | 240 | ||
241 | /* Unregister as remoteproc device */ | 241 | /* Unregister as remoteproc device */ |
242 | rproc_del(sproc->rproc); | 242 | rproc_del(sproc->rproc); |
243 | dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE, | ||
244 | sproc->fw_addr, sproc->fw_dma_addr); | ||
243 | rproc_put(sproc->rproc); | 245 | rproc_put(sproc->rproc); |
244 | 246 | ||
245 | mdev->drv_data = NULL; | 247 | mdev->drv_data = NULL; |
@@ -297,10 +299,13 @@ static int sproc_probe(struct platform_device *pdev) | |||
297 | /* Register as a remoteproc device */ | 299 | /* Register as a remoteproc device */ |
298 | err = rproc_add(rproc); | 300 | err = rproc_add(rproc); |
299 | if (err) | 301 | if (err) |
300 | goto free_rproc; | 302 | goto free_mem; |
301 | 303 | ||
302 | return 0; | 304 | return 0; |
303 | 305 | ||
306 | free_mem: | ||
307 | dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE, | ||
308 | sproc->fw_addr, sproc->fw_dma_addr); | ||
304 | free_rproc: | 309 | free_rproc: |
305 | /* Reset device data upon error */ | 310 | /* Reset device data upon error */ |
306 | mdev->drv_data = NULL; | 311 | mdev->drv_data = NULL; |
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 0a9f27e094ea..434ebc3a99dc 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c | |||
@@ -44,7 +44,6 @@ static DECLARE_COMPLETION(at91_rtc_updated); | |||
44 | static unsigned int at91_alarm_year = AT91_RTC_EPOCH; | 44 | static unsigned int at91_alarm_year = AT91_RTC_EPOCH; |
45 | static void __iomem *at91_rtc_regs; | 45 | static void __iomem *at91_rtc_regs; |
46 | static int irq; | 46 | static int irq; |
47 | static u32 at91_rtc_imr; | ||
48 | 47 | ||
49 | /* | 48 | /* |
50 | * Decode time/date into rtc_time structure | 49 | * Decode time/date into rtc_time structure |
@@ -109,11 +108,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm) | |||
109 | cr = at91_rtc_read(AT91_RTC_CR); | 108 | cr = at91_rtc_read(AT91_RTC_CR); |
110 | at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); | 109 | at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); |
111 | 110 | ||
112 | at91_rtc_imr |= AT91_RTC_ACKUPD; | ||
113 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); | 111 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); |
114 | wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ | 112 | wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ |
115 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); | 113 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); |
116 | at91_rtc_imr &= ~AT91_RTC_ACKUPD; | ||
117 | 114 | ||
118 | at91_rtc_write(AT91_RTC_TIMR, | 115 | at91_rtc_write(AT91_RTC_TIMR, |
119 | bin2bcd(tm->tm_sec) << 0 | 116 | bin2bcd(tm->tm_sec) << 0 |
@@ -145,7 +142,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
145 | tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); | 142 | tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); |
146 | tm->tm_year = at91_alarm_year - 1900; | 143 | tm->tm_year = at91_alarm_year - 1900; |
147 | 144 | ||
148 | alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM) | 145 | alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM) |
149 | ? 1 : 0; | 146 | ? 1 : 0; |
150 | 147 | ||
151 | dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, | 148 | dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, |
@@ -171,7 +168,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
171 | tm.tm_sec = alrm->time.tm_sec; | 168 | tm.tm_sec = alrm->time.tm_sec; |
172 | 169 | ||
173 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); | 170 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); |
174 | at91_rtc_imr &= ~AT91_RTC_ALARM; | ||
175 | at91_rtc_write(AT91_RTC_TIMALR, | 171 | at91_rtc_write(AT91_RTC_TIMALR, |
176 | bin2bcd(tm.tm_sec) << 0 | 172 | bin2bcd(tm.tm_sec) << 0 |
177 | | bin2bcd(tm.tm_min) << 8 | 173 | | bin2bcd(tm.tm_min) << 8 |
@@ -184,7 +180,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
184 | 180 | ||
185 | if (alrm->enabled) { | 181 | if (alrm->enabled) { |
186 | at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); | 182 | at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); |
187 | at91_rtc_imr |= AT91_RTC_ALARM; | ||
188 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); | 183 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); |
189 | } | 184 | } |
190 | 185 | ||
@@ -201,12 +196,9 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | |||
201 | 196 | ||
202 | if (enabled) { | 197 | if (enabled) { |
203 | at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); | 198 | at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); |
204 | at91_rtc_imr |= AT91_RTC_ALARM; | ||
205 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); | 199 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); |
206 | } else { | 200 | } else |
207 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); | 201 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); |
208 | at91_rtc_imr &= ~AT91_RTC_ALARM; | ||
209 | } | ||
210 | 202 | ||
211 | return 0; | 203 | return 0; |
212 | } | 204 | } |
@@ -215,10 +207,12 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | |||
215 | */ | 207 | */ |
216 | static int at91_rtc_proc(struct device *dev, struct seq_file *seq) | 208 | static int at91_rtc_proc(struct device *dev, struct seq_file *seq) |
217 | { | 209 | { |
210 | unsigned long imr = at91_rtc_read(AT91_RTC_IMR); | ||
211 | |||
218 | seq_printf(seq, "update_IRQ\t: %s\n", | 212 | seq_printf(seq, "update_IRQ\t: %s\n", |
219 | (at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no"); | 213 | (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); |
220 | seq_printf(seq, "periodic_IRQ\t: %s\n", | 214 | seq_printf(seq, "periodic_IRQ\t: %s\n", |
221 | (at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no"); | 215 | (imr & AT91_RTC_SECEV) ? "yes" : "no"); |
222 | 216 | ||
223 | return 0; | 217 | return 0; |
224 | } | 218 | } |
@@ -233,7 +227,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) | |||
233 | unsigned int rtsr; | 227 | unsigned int rtsr; |
234 | unsigned long events = 0; | 228 | unsigned long events = 0; |
235 | 229 | ||
236 | rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr; | 230 | rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR); |
237 | if (rtsr) { /* this interrupt is shared! Is it ours? */ | 231 | if (rtsr) { /* this interrupt is shared! Is it ours? */ |
238 | if (rtsr & AT91_RTC_ALARM) | 232 | if (rtsr & AT91_RTC_ALARM) |
239 | events |= (RTC_AF | RTC_IRQF); | 233 | events |= (RTC_AF | RTC_IRQF); |
@@ -297,7 +291,6 @@ static int __init at91_rtc_probe(struct platform_device *pdev) | |||
297 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | | 291 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | |
298 | AT91_RTC_SECEV | AT91_RTC_TIMEV | | 292 | AT91_RTC_SECEV | AT91_RTC_TIMEV | |
299 | AT91_RTC_CALEV); | 293 | AT91_RTC_CALEV); |
300 | at91_rtc_imr = 0; | ||
301 | 294 | ||
302 | ret = request_irq(irq, at91_rtc_interrupt, | 295 | ret = request_irq(irq, at91_rtc_interrupt, |
303 | IRQF_SHARED, | 296 | IRQF_SHARED, |
@@ -336,7 +329,6 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) | |||
336 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | | 329 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | |
337 | AT91_RTC_SECEV | AT91_RTC_TIMEV | | 330 | AT91_RTC_SECEV | AT91_RTC_TIMEV | |
338 | AT91_RTC_CALEV); | 331 | AT91_RTC_CALEV); |
339 | at91_rtc_imr = 0; | ||
340 | free_irq(irq, pdev); | 332 | free_irq(irq, pdev); |
341 | 333 | ||
342 | rtc_device_unregister(rtc); | 334 | rtc_device_unregister(rtc); |
@@ -349,35 +341,31 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) | |||
349 | 341 | ||
350 | /* AT91RM9200 RTC Power management control */ | 342 | /* AT91RM9200 RTC Power management control */ |
351 | 343 | ||
352 | static u32 at91_rtc_bkpimr; | 344 | static u32 at91_rtc_imr; |
353 | |||
354 | 345 | ||
355 | static int at91_rtc_suspend(struct device *dev) | 346 | static int at91_rtc_suspend(struct device *dev) |
356 | { | 347 | { |
357 | /* this IRQ is shared with DBGU and other hardware which isn't | 348 | /* this IRQ is shared with DBGU and other hardware which isn't |
358 | * necessarily doing PM like we are... | 349 | * necessarily doing PM like we are... |
359 | */ | 350 | */ |
360 | at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV); | 351 | at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR) |
361 | if (at91_rtc_bkpimr) { | 352 | & (AT91_RTC_ALARM|AT91_RTC_SECEV); |
362 | if (device_may_wakeup(dev)) { | 353 | if (at91_rtc_imr) { |
354 | if (device_may_wakeup(dev)) | ||
363 | enable_irq_wake(irq); | 355 | enable_irq_wake(irq); |
364 | } else { | 356 | else |
365 | at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr); | 357 | at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr); |
366 | at91_rtc_imr &= ~at91_rtc_bkpimr; | 358 | } |
367 | } | ||
368 | } | ||
369 | return 0; | 359 | return 0; |
370 | } | 360 | } |
371 | 361 | ||
372 | static int at91_rtc_resume(struct device *dev) | 362 | static int at91_rtc_resume(struct device *dev) |
373 | { | 363 | { |
374 | if (at91_rtc_bkpimr) { | 364 | if (at91_rtc_imr) { |
375 | if (device_may_wakeup(dev)) { | 365 | if (device_may_wakeup(dev)) |
376 | disable_irq_wake(irq); | 366 | disable_irq_wake(irq); |
377 | } else { | 367 | else |
378 | at91_rtc_imr |= at91_rtc_bkpimr; | 368 | at91_rtc_write(AT91_RTC_IER, at91_rtc_imr); |
379 | at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr); | ||
380 | } | ||
381 | } | 369 | } |
382 | return 0; | 370 | return 0; |
383 | } | 371 | } |
diff --git a/drivers/rtc/rtc-at91rm9200.h b/drivers/rtc/rtc-at91rm9200.h index 5f940b6844cb..da1945e5f714 100644 --- a/drivers/rtc/rtc-at91rm9200.h +++ b/drivers/rtc/rtc-at91rm9200.h | |||
@@ -64,6 +64,7 @@ | |||
64 | #define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */ | 64 | #define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */ |
65 | #define AT91_RTC_IER 0x20 /* Interrupt Enable Register */ | 65 | #define AT91_RTC_IER 0x20 /* Interrupt Enable Register */ |
66 | #define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */ | 66 | #define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */ |
67 | #define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */ | ||
67 | 68 | ||
68 | #define AT91_RTC_VER 0x2c /* Valid Entry Register */ | 69 | #define AT91_RTC_VER 0x2c /* Valid Entry Register */ |
69 | #define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */ | 70 | #define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */ |
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 5ac9c935c151..e9b9c8392832 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c | |||
@@ -307,7 +307,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq) | |||
307 | case EQC_WR_PROHIBIT: | 307 | case EQC_WR_PROHIBIT: |
308 | spin_lock_irqsave(&bdev->lock, flags); | 308 | spin_lock_irqsave(&bdev->lock, flags); |
309 | if (bdev->state != SCM_WR_PROHIBIT) | 309 | if (bdev->state != SCM_WR_PROHIBIT) |
310 | pr_info("%lu: Write access to the SCM increment is suspended\n", | 310 | pr_info("%lx: Write access to the SCM increment is suspended\n", |
311 | (unsigned long) bdev->scmdev->address); | 311 | (unsigned long) bdev->scmdev->address); |
312 | bdev->state = SCM_WR_PROHIBIT; | 312 | bdev->state = SCM_WR_PROHIBIT; |
313 | spin_unlock_irqrestore(&bdev->lock, flags); | 313 | spin_unlock_irqrestore(&bdev->lock, flags); |
@@ -445,7 +445,7 @@ void scm_blk_set_available(struct scm_blk_dev *bdev) | |||
445 | 445 | ||
446 | spin_lock_irqsave(&bdev->lock, flags); | 446 | spin_lock_irqsave(&bdev->lock, flags); |
447 | if (bdev->state == SCM_WR_PROHIBIT) | 447 | if (bdev->state == SCM_WR_PROHIBIT) |
448 | pr_info("%lu: Write access to the SCM increment is restored\n", | 448 | pr_info("%lx: Write access to the SCM increment is restored\n", |
449 | (unsigned long) bdev->scmdev->address); | 449 | (unsigned long) bdev->scmdev->address); |
450 | bdev->state = SCM_OPER; | 450 | bdev->state = SCM_OPER; |
451 | spin_unlock_irqrestore(&bdev->lock, flags); | 451 | spin_unlock_irqrestore(&bdev->lock, flags); |
@@ -463,12 +463,15 @@ static int __init scm_blk_init(void) | |||
463 | goto out; | 463 | goto out; |
464 | 464 | ||
465 | scm_major = ret; | 465 | scm_major = ret; |
466 | if (scm_alloc_rqs(nr_requests)) | 466 | ret = scm_alloc_rqs(nr_requests); |
467 | if (ret) | ||
467 | goto out_unreg; | 468 | goto out_unreg; |
468 | 469 | ||
469 | scm_debug = debug_register("scm_log", 16, 1, 16); | 470 | scm_debug = debug_register("scm_log", 16, 1, 16); |
470 | if (!scm_debug) | 471 | if (!scm_debug) { |
472 | ret = -ENOMEM; | ||
471 | goto out_free; | 473 | goto out_free; |
474 | } | ||
472 | 475 | ||
473 | debug_register_view(scm_debug, &debug_hex_ascii_view); | 476 | debug_register_view(scm_debug, &debug_hex_ascii_view); |
474 | debug_set_level(scm_debug, 2); | 477 | debug_set_level(scm_debug, 2); |
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c index 5f6180d6ff08..c98cf52d78d1 100644 --- a/drivers/s390/block/scm_drv.c +++ b/drivers/s390/block/scm_drv.c | |||
@@ -19,7 +19,7 @@ static void scm_notify(struct scm_device *scmdev, enum scm_event event) | |||
19 | 19 | ||
20 | switch (event) { | 20 | switch (event) { |
21 | case SCM_CHANGE: | 21 | case SCM_CHANGE: |
22 | pr_info("%lu: The capabilities of the SCM increment changed\n", | 22 | pr_info("%lx: The capabilities of the SCM increment changed\n", |
23 | (unsigned long) scmdev->address); | 23 | (unsigned long) scmdev->address); |
24 | SCM_LOG(2, "State changed"); | 24 | SCM_LOG(2, "State changed"); |
25 | SCM_LOG_STATE(2, scmdev); | 25 | SCM_LOG_STATE(2, scmdev); |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index b907dba24025..cee69dac3e18 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
@@ -915,7 +915,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) | |||
915 | int i, rc; | 915 | int i, rc; |
916 | 916 | ||
917 | /* Check if the tty3270 is already there. */ | 917 | /* Check if the tty3270 is already there. */ |
918 | view = raw3270_find_view(&tty3270_fn, tty->index); | 918 | view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR); |
919 | if (!IS_ERR(view)) { | 919 | if (!IS_ERR(view)) { |
920 | tp = container_of(view, struct tty3270, view); | 920 | tp = container_of(view, struct tty3270, view); |
921 | tty->driver_data = tp; | 921 | tty->driver_data = tp; |
@@ -927,15 +927,16 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) | |||
927 | tp->inattr = TF_INPUT; | 927 | tp->inattr = TF_INPUT; |
928 | return tty_port_install(&tp->port, driver, tty); | 928 | return tty_port_install(&tp->port, driver, tty); |
929 | } | 929 | } |
930 | if (tty3270_max_index < tty->index) | 930 | if (tty3270_max_index < tty->index + 1) |
931 | tty3270_max_index = tty->index; | 931 | tty3270_max_index = tty->index + 1; |
932 | 932 | ||
933 | /* Allocate tty3270 structure on first open. */ | 933 | /* Allocate tty3270 structure on first open. */ |
934 | tp = tty3270_alloc_view(); | 934 | tp = tty3270_alloc_view(); |
935 | if (IS_ERR(tp)) | 935 | if (IS_ERR(tp)) |
936 | return PTR_ERR(tp); | 936 | return PTR_ERR(tp); |
937 | 937 | ||
938 | rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index); | 938 | rc = raw3270_add_view(&tp->view, &tty3270_fn, |
939 | tty->index + RAW3270_FIRSTMINOR); | ||
939 | if (rc) { | 940 | if (rc) { |
940 | tty3270_free_view(tp); | 941 | tty3270_free_view(tp); |
941 | return rc; | 942 | return rc; |
@@ -1846,12 +1847,12 @@ static const struct tty_operations tty3270_ops = { | |||
1846 | 1847 | ||
1847 | void tty3270_create_cb(int minor) | 1848 | void tty3270_create_cb(int minor) |
1848 | { | 1849 | { |
1849 | tty_register_device(tty3270_driver, minor, NULL); | 1850 | tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL); |
1850 | } | 1851 | } |
1851 | 1852 | ||
1852 | void tty3270_destroy_cb(int minor) | 1853 | void tty3270_destroy_cb(int minor) |
1853 | { | 1854 | { |
1854 | tty_unregister_device(tty3270_driver, minor); | 1855 | tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR); |
1855 | } | 1856 | } |
1856 | 1857 | ||
1857 | struct raw3270_notifier tty3270_notifier = | 1858 | struct raw3270_notifier tty3270_notifier = |
@@ -1884,7 +1885,8 @@ static int __init tty3270_init(void) | |||
1884 | driver->driver_name = "tty3270"; | 1885 | driver->driver_name = "tty3270"; |
1885 | driver->name = "3270/tty"; | 1886 | driver->name = "3270/tty"; |
1886 | driver->major = IBM_TTY3270_MAJOR; | 1887 | driver->major = IBM_TTY3270_MAJOR; |
1887 | driver->minor_start = 0; | 1888 | driver->minor_start = RAW3270_FIRSTMINOR; |
1889 | driver->name_base = RAW3270_FIRSTMINOR; | ||
1888 | driver->type = TTY_DRIVER_TYPE_SYSTEM; | 1890 | driver->type = TTY_DRIVER_TYPE_SYSTEM; |
1889 | driver->subtype = SYSTEM_TYPE_TTY; | 1891 | driver->subtype = SYSTEM_TYPE_TTY; |
1890 | driver->init_termios = tty_std_termios; | 1892 | driver->init_termios = tty_std_termios; |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 8c0622399fcd..6ccb7457746b 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -769,6 +769,7 @@ struct qeth_card { | |||
769 | unsigned long thread_start_mask; | 769 | unsigned long thread_start_mask; |
770 | unsigned long thread_allowed_mask; | 770 | unsigned long thread_allowed_mask; |
771 | unsigned long thread_running_mask; | 771 | unsigned long thread_running_mask; |
772 | struct task_struct *recovery_task; | ||
772 | spinlock_t ip_lock; | 773 | spinlock_t ip_lock; |
773 | struct list_head ip_list; | 774 | struct list_head ip_list; |
774 | struct list_head *ip_tbd_list; | 775 | struct list_head *ip_tbd_list; |
@@ -862,6 +863,8 @@ extern struct qeth_card_list_struct qeth_core_card_list; | |||
862 | extern struct kmem_cache *qeth_core_header_cache; | 863 | extern struct kmem_cache *qeth_core_header_cache; |
863 | extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; | 864 | extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; |
864 | 865 | ||
866 | void qeth_set_recovery_task(struct qeth_card *); | ||
867 | void qeth_clear_recovery_task(struct qeth_card *); | ||
865 | void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); | 868 | void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); |
866 | int qeth_threads_running(struct qeth_card *, unsigned long); | 869 | int qeth_threads_running(struct qeth_card *, unsigned long); |
867 | int qeth_wait_for_threads(struct qeth_card *, unsigned long); | 870 | int qeth_wait_for_threads(struct qeth_card *, unsigned long); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 0d73a999983d..451f92020599 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -177,6 +177,23 @@ const char *qeth_get_cardname_short(struct qeth_card *card) | |||
177 | return "n/a"; | 177 | return "n/a"; |
178 | } | 178 | } |
179 | 179 | ||
180 | void qeth_set_recovery_task(struct qeth_card *card) | ||
181 | { | ||
182 | card->recovery_task = current; | ||
183 | } | ||
184 | EXPORT_SYMBOL_GPL(qeth_set_recovery_task); | ||
185 | |||
186 | void qeth_clear_recovery_task(struct qeth_card *card) | ||
187 | { | ||
188 | card->recovery_task = NULL; | ||
189 | } | ||
190 | EXPORT_SYMBOL_GPL(qeth_clear_recovery_task); | ||
191 | |||
192 | static bool qeth_is_recovery_task(const struct qeth_card *card) | ||
193 | { | ||
194 | return card->recovery_task == current; | ||
195 | } | ||
196 | |||
180 | void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, | 197 | void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, |
181 | int clear_start_mask) | 198 | int clear_start_mask) |
182 | { | 199 | { |
@@ -205,6 +222,8 @@ EXPORT_SYMBOL_GPL(qeth_threads_running); | |||
205 | 222 | ||
206 | int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) | 223 | int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) |
207 | { | 224 | { |
225 | if (qeth_is_recovery_task(card)) | ||
226 | return 0; | ||
208 | return wait_event_interruptible(card->wait_q, | 227 | return wait_event_interruptible(card->wait_q, |
209 | qeth_threads_running(card, threads) == 0); | 228 | qeth_threads_running(card, threads) == 0); |
210 | } | 229 | } |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d690166efeaf..155b101bd730 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -1143,6 +1143,7 @@ static int qeth_l2_recover(void *ptr) | |||
1143 | QETH_CARD_TEXT(card, 2, "recover2"); | 1143 | QETH_CARD_TEXT(card, 2, "recover2"); |
1144 | dev_warn(&card->gdev->dev, | 1144 | dev_warn(&card->gdev->dev, |
1145 | "A recovery process has been started for the device\n"); | 1145 | "A recovery process has been started for the device\n"); |
1146 | qeth_set_recovery_task(card); | ||
1146 | __qeth_l2_set_offline(card->gdev, 1); | 1147 | __qeth_l2_set_offline(card->gdev, 1); |
1147 | rc = __qeth_l2_set_online(card->gdev, 1); | 1148 | rc = __qeth_l2_set_online(card->gdev, 1); |
1148 | if (!rc) | 1149 | if (!rc) |
@@ -1153,6 +1154,7 @@ static int qeth_l2_recover(void *ptr) | |||
1153 | dev_warn(&card->gdev->dev, "The qeth device driver " | 1154 | dev_warn(&card->gdev->dev, "The qeth device driver " |
1154 | "failed to recover an error on the device\n"); | 1155 | "failed to recover an error on the device\n"); |
1155 | } | 1156 | } |
1157 | qeth_clear_recovery_task(card); | ||
1156 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | 1158 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); |
1157 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | 1159 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); |
1158 | return 0; | 1160 | return 0; |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 8710337dab3e..1f7edf1b26c3 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -3515,6 +3515,7 @@ static int qeth_l3_recover(void *ptr) | |||
3515 | QETH_CARD_TEXT(card, 2, "recover2"); | 3515 | QETH_CARD_TEXT(card, 2, "recover2"); |
3516 | dev_warn(&card->gdev->dev, | 3516 | dev_warn(&card->gdev->dev, |
3517 | "A recovery process has been started for the device\n"); | 3517 | "A recovery process has been started for the device\n"); |
3518 | qeth_set_recovery_task(card); | ||
3518 | __qeth_l3_set_offline(card->gdev, 1); | 3519 | __qeth_l3_set_offline(card->gdev, 1); |
3519 | rc = __qeth_l3_set_online(card->gdev, 1); | 3520 | rc = __qeth_l3_set_online(card->gdev, 1); |
3520 | if (!rc) | 3521 | if (!rc) |
@@ -3525,6 +3526,7 @@ static int qeth_l3_recover(void *ptr) | |||
3525 | dev_warn(&card->gdev->dev, "The qeth device driver " | 3526 | dev_warn(&card->gdev->dev, "The qeth device driver " |
3526 | "failed to recover an error on the device\n"); | 3527 | "failed to recover an error on the device\n"); |
3527 | } | 3528 | } |
3529 | qeth_clear_recovery_task(card); | ||
3528 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | 3530 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); |
3529 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | 3531 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); |
3530 | return 0; | 3532 | return 0; |
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c index 1a9d1e3ce64c..c1441ed282eb 100644 --- a/drivers/sbus/char/bbc_i2c.c +++ b/drivers/sbus/char/bbc_i2c.c | |||
@@ -282,7 +282,7 @@ static irqreturn_t bbc_i2c_interrupt(int irq, void *dev_id) | |||
282 | return IRQ_HANDLED; | 282 | return IRQ_HANDLED; |
283 | } | 283 | } |
284 | 284 | ||
285 | static void __init reset_one_i2c(struct bbc_i2c_bus *bp) | 285 | static void reset_one_i2c(struct bbc_i2c_bus *bp) |
286 | { | 286 | { |
287 | writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); | 287 | writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); |
288 | writeb(bp->own, bp->i2c_control_regs + 0x1); | 288 | writeb(bp->own, bp->i2c_control_regs + 0x1); |
@@ -291,7 +291,7 @@ static void __init reset_one_i2c(struct bbc_i2c_bus *bp) | |||
291 | writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0); | 291 | writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0); |
292 | } | 292 | } |
293 | 293 | ||
294 | static struct bbc_i2c_bus * __init attach_one_i2c(struct platform_device *op, int index) | 294 | static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index) |
295 | { | 295 | { |
296 | struct bbc_i2c_bus *bp; | 296 | struct bbc_i2c_bus *bp; |
297 | struct device_node *dp; | 297 | struct device_node *dp; |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 2daf4b0da434..90bc7bd00966 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
@@ -940,6 +940,7 @@ static int bnx2fc_libfc_config(struct fc_lport *lport) | |||
940 | fc_exch_init(lport); | 940 | fc_exch_init(lport); |
941 | fc_rport_init(lport); | 941 | fc_rport_init(lport); |
942 | fc_disc_init(lport); | 942 | fc_disc_init(lport); |
943 | fc_disc_config(lport, lport); | ||
943 | return 0; | 944 | return 0; |
944 | } | 945 | } |
945 | 946 | ||
@@ -2133,6 +2134,7 @@ static int _bnx2fc_create(struct net_device *netdev, | |||
2133 | } | 2134 | } |
2134 | 2135 | ||
2135 | ctlr = bnx2fc_to_ctlr(interface); | 2136 | ctlr = bnx2fc_to_ctlr(interface); |
2137 | cdev = fcoe_ctlr_to_ctlr_dev(ctlr); | ||
2136 | interface->vlan_id = vlan_id; | 2138 | interface->vlan_id = vlan_id; |
2137 | 2139 | ||
2138 | interface->timer_work_queue = | 2140 | interface->timer_work_queue = |
@@ -2143,7 +2145,7 @@ static int _bnx2fc_create(struct net_device *netdev, | |||
2143 | goto ifput_err; | 2145 | goto ifput_err; |
2144 | } | 2146 | } |
2145 | 2147 | ||
2146 | lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0); | 2148 | lport = bnx2fc_if_create(interface, &cdev->dev, 0); |
2147 | if (!lport) { | 2149 | if (!lport) { |
2148 | printk(KERN_ERR PFX "Failed to create interface (%s)\n", | 2150 | printk(KERN_ERR PFX "Failed to create interface (%s)\n", |
2149 | netdev->name); | 2151 | netdev->name); |
@@ -2159,8 +2161,6 @@ static int _bnx2fc_create(struct net_device *netdev, | |||
2159 | /* Make this master N_port */ | 2161 | /* Make this master N_port */ |
2160 | ctlr->lp = lport; | 2162 | ctlr->lp = lport; |
2161 | 2163 | ||
2162 | cdev = fcoe_ctlr_to_ctlr_dev(ctlr); | ||
2163 | |||
2164 | if (link_state == BNX2FC_CREATE_LINK_UP) | 2164 | if (link_state == BNX2FC_CREATE_LINK_UP) |
2165 | cdev->enabled = FCOE_CTLR_ENABLED; | 2165 | cdev->enabled = FCOE_CTLR_ENABLED; |
2166 | else | 2166 | else |
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index b5d92fc93c70..9bfdc9a3f897 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -490,7 +490,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) | |||
490 | { | 490 | { |
491 | struct net_device *netdev = fcoe->netdev; | 491 | struct net_device *netdev = fcoe->netdev; |
492 | struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); | 492 | struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); |
493 | struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); | ||
494 | 493 | ||
495 | rtnl_lock(); | 494 | rtnl_lock(); |
496 | if (!fcoe->removed) | 495 | if (!fcoe->removed) |
@@ -501,7 +500,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) | |||
501 | /* tear-down the FCoE controller */ | 500 | /* tear-down the FCoE controller */ |
502 | fcoe_ctlr_destroy(fip); | 501 | fcoe_ctlr_destroy(fip); |
503 | scsi_host_put(fip->lp->host); | 502 | scsi_host_put(fip->lp->host); |
504 | fcoe_ctlr_device_delete(ctlr_dev); | ||
505 | dev_put(netdev); | 503 | dev_put(netdev); |
506 | module_put(THIS_MODULE); | 504 | module_put(THIS_MODULE); |
507 | } | 505 | } |
@@ -2194,6 +2192,8 @@ out_nodev: | |||
2194 | */ | 2192 | */ |
2195 | static void fcoe_destroy_work(struct work_struct *work) | 2193 | static void fcoe_destroy_work(struct work_struct *work) |
2196 | { | 2194 | { |
2195 | struct fcoe_ctlr_device *cdev; | ||
2196 | struct fcoe_ctlr *ctlr; | ||
2197 | struct fcoe_port *port; | 2197 | struct fcoe_port *port; |
2198 | struct fcoe_interface *fcoe; | 2198 | struct fcoe_interface *fcoe; |
2199 | struct Scsi_Host *shost; | 2199 | struct Scsi_Host *shost; |
@@ -2224,10 +2224,15 @@ static void fcoe_destroy_work(struct work_struct *work) | |||
2224 | mutex_lock(&fcoe_config_mutex); | 2224 | mutex_lock(&fcoe_config_mutex); |
2225 | 2225 | ||
2226 | fcoe = port->priv; | 2226 | fcoe = port->priv; |
2227 | ctlr = fcoe_to_ctlr(fcoe); | ||
2228 | cdev = fcoe_ctlr_to_ctlr_dev(ctlr); | ||
2229 | |||
2227 | fcoe_if_destroy(port->lport); | 2230 | fcoe_if_destroy(port->lport); |
2228 | fcoe_interface_cleanup(fcoe); | 2231 | fcoe_interface_cleanup(fcoe); |
2229 | 2232 | ||
2230 | mutex_unlock(&fcoe_config_mutex); | 2233 | mutex_unlock(&fcoe_config_mutex); |
2234 | |||
2235 | fcoe_ctlr_device_delete(cdev); | ||
2231 | } | 2236 | } |
2232 | 2237 | ||
2233 | /** | 2238 | /** |
@@ -2335,7 +2340,9 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, | |||
2335 | rc = -EIO; | 2340 | rc = -EIO; |
2336 | rtnl_unlock(); | 2341 | rtnl_unlock(); |
2337 | fcoe_interface_cleanup(fcoe); | 2342 | fcoe_interface_cleanup(fcoe); |
2338 | goto out_nortnl; | 2343 | mutex_unlock(&fcoe_config_mutex); |
2344 | fcoe_ctlr_device_delete(ctlr_dev); | ||
2345 | goto out; | ||
2339 | } | 2346 | } |
2340 | 2347 | ||
2341 | /* Make this the "master" N_Port */ | 2348 | /* Make this the "master" N_Port */ |
@@ -2375,8 +2382,8 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, | |||
2375 | 2382 | ||
2376 | out_nodev: | 2383 | out_nodev: |
2377 | rtnl_unlock(); | 2384 | rtnl_unlock(); |
2378 | out_nortnl: | ||
2379 | mutex_unlock(&fcoe_config_mutex); | 2385 | mutex_unlock(&fcoe_config_mutex); |
2386 | out: | ||
2380 | return rc; | 2387 | return rc; |
2381 | } | 2388 | } |
2382 | 2389 | ||
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 08c3bc398da2..a76247201be5 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c | |||
@@ -2815,6 +2815,47 @@ unlock: | |||
2815 | } | 2815 | } |
2816 | 2816 | ||
2817 | /** | 2817 | /** |
2818 | * fcoe_ctlr_mode_set() - Set or reset the ctlr's mode | ||
2819 | * @lport: The local port to be (re)configured | ||
2820 | * @fip: The FCoE controller whose mode is changing | ||
2821 | * @fip_mode: The new fip mode | ||
2822 | * | ||
2823 | * Note that the we shouldn't be changing the libfc discovery settings | ||
2824 | * (fc_disc_config) while an lport is going through the libfc state | ||
2825 | * machine. The mode can only be changed when a fcoe_ctlr device is | ||
2826 | * disabled, so that should ensure that this routine is only called | ||
2827 | * when nothing is happening. | ||
2828 | */ | ||
2829 | void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, | ||
2830 | enum fip_state fip_mode) | ||
2831 | { | ||
2832 | void *priv; | ||
2833 | |||
2834 | WARN_ON(lport->state != LPORT_ST_RESET && | ||
2835 | lport->state != LPORT_ST_DISABLED); | ||
2836 | |||
2837 | if (fip_mode == FIP_MODE_VN2VN) { | ||
2838 | lport->rport_priv_size = sizeof(struct fcoe_rport); | ||
2839 | lport->point_to_multipoint = 1; | ||
2840 | lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; | ||
2841 | lport->tt.disc_start = fcoe_ctlr_disc_start; | ||
2842 | lport->tt.disc_stop = fcoe_ctlr_disc_stop; | ||
2843 | lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; | ||
2844 | priv = fip; | ||
2845 | } else { | ||
2846 | lport->rport_priv_size = 0; | ||
2847 | lport->point_to_multipoint = 0; | ||
2848 | lport->tt.disc_recv_req = NULL; | ||
2849 | lport->tt.disc_start = NULL; | ||
2850 | lport->tt.disc_stop = NULL; | ||
2851 | lport->tt.disc_stop_final = NULL; | ||
2852 | priv = lport; | ||
2853 | } | ||
2854 | |||
2855 | fc_disc_config(lport, priv); | ||
2856 | } | ||
2857 | |||
2858 | /** | ||
2818 | * fcoe_libfc_config() - Sets up libfc related properties for local port | 2859 | * fcoe_libfc_config() - Sets up libfc related properties for local port |
2819 | * @lport: The local port to configure libfc for | 2860 | * @lport: The local port to configure libfc for |
2820 | * @fip: The FCoE controller in use by the local port | 2861 | * @fip: The FCoE controller in use by the local port |
@@ -2833,21 +2874,9 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip, | |||
2833 | fc_exch_init(lport); | 2874 | fc_exch_init(lport); |
2834 | fc_elsct_init(lport); | 2875 | fc_elsct_init(lport); |
2835 | fc_lport_init(lport); | 2876 | fc_lport_init(lport); |
2836 | if (fip->mode == FIP_MODE_VN2VN) | ||
2837 | lport->rport_priv_size = sizeof(struct fcoe_rport); | ||
2838 | fc_rport_init(lport); | 2877 | fc_rport_init(lport); |
2839 | if (fip->mode == FIP_MODE_VN2VN) { | 2878 | fc_disc_init(lport); |
2840 | lport->point_to_multipoint = 1; | 2879 | fcoe_ctlr_mode_set(lport, fip, fip->mode); |
2841 | lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; | ||
2842 | lport->tt.disc_start = fcoe_ctlr_disc_start; | ||
2843 | lport->tt.disc_stop = fcoe_ctlr_disc_stop; | ||
2844 | lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; | ||
2845 | mutex_init(&lport->disc.disc_mutex); | ||
2846 | INIT_LIST_HEAD(&lport->disc.rports); | ||
2847 | lport->disc.priv = fip; | ||
2848 | } else { | ||
2849 | fc_disc_init(lport); | ||
2850 | } | ||
2851 | return 0; | 2880 | return 0; |
2852 | } | 2881 | } |
2853 | EXPORT_SYMBOL_GPL(fcoe_libfc_config); | 2882 | EXPORT_SYMBOL_GPL(fcoe_libfc_config); |
@@ -2875,6 +2904,7 @@ EXPORT_SYMBOL(fcoe_fcf_get_selected); | |||
2875 | void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) | 2904 | void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) |
2876 | { | 2905 | { |
2877 | struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); | 2906 | struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); |
2907 | struct fc_lport *lport = ctlr->lp; | ||
2878 | 2908 | ||
2879 | mutex_lock(&ctlr->ctlr_mutex); | 2909 | mutex_lock(&ctlr->ctlr_mutex); |
2880 | switch (ctlr_dev->mode) { | 2910 | switch (ctlr_dev->mode) { |
@@ -2888,5 +2918,7 @@ void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) | |||
2888 | } | 2918 | } |
2889 | 2919 | ||
2890 | mutex_unlock(&ctlr->ctlr_mutex); | 2920 | mutex_unlock(&ctlr->ctlr_mutex); |
2921 | |||
2922 | fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode); | ||
2891 | } | 2923 | } |
2892 | EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode); | 2924 | EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode); |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index a044f593e8b9..d0fa4b6c551f 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -1899,8 +1899,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) | |||
1899 | sdev->allow_restart = 1; | 1899 | sdev->allow_restart = 1; |
1900 | blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); | 1900 | blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); |
1901 | } | 1901 | } |
1902 | scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); | ||
1903 | spin_unlock_irqrestore(shost->host_lock, lock_flags); | 1902 | spin_unlock_irqrestore(shost->host_lock, lock_flags); |
1903 | scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); | ||
1904 | return 0; | 1904 | return 0; |
1905 | } | 1905 | } |
1906 | 1906 | ||
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index f328089a1060..2197b57fb225 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -5148,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) | |||
5148 | ipr_trace; | 5148 | ipr_trace; |
5149 | } | 5149 | } |
5150 | 5150 | ||
5151 | list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); | 5151 | list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); |
5152 | if (!ipr_is_naca_model(res)) | 5152 | if (!ipr_is_naca_model(res)) |
5153 | res->needs_sync_complete = 1; | 5153 | res->needs_sync_complete = 1; |
5154 | 5154 | ||
@@ -9349,7 +9349,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) | |||
9349 | int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | 9349 | int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); |
9350 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 9350 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
9351 | 9351 | ||
9352 | rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); | 9352 | if (ioa_cfg->intr_flag == IPR_USE_MSIX) |
9353 | rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg); | ||
9354 | else | ||
9355 | rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); | ||
9353 | if (rc) { | 9356 | if (rc) { |
9354 | dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); | 9357 | dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); |
9355 | return rc; | 9358 | return rc; |
@@ -9371,7 +9374,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) | |||
9371 | 9374 | ||
9372 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 9375 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
9373 | 9376 | ||
9374 | free_irq(pdev->irq, ioa_cfg); | 9377 | if (ioa_cfg->intr_flag == IPR_USE_MSIX) |
9378 | free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg); | ||
9379 | else | ||
9380 | free_irq(pdev->irq, ioa_cfg); | ||
9375 | 9381 | ||
9376 | LEAVE; | 9382 | LEAVE; |
9377 | 9383 | ||
@@ -9722,6 +9728,7 @@ static void __ipr_remove(struct pci_dev *pdev) | |||
9722 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); | 9728 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); |
9723 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); | 9729 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); |
9724 | flush_work(&ioa_cfg->work_q); | 9730 | flush_work(&ioa_cfg->work_q); |
9731 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); | ||
9725 | spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); | 9732 | spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); |
9726 | 9733 | ||
9727 | spin_lock(&ipr_driver_lock); | 9734 | spin_lock(&ipr_driver_lock); |
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 8e561e6a557c..880a9068ca12 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c | |||
@@ -712,12 +712,13 @@ static void fc_disc_stop_final(struct fc_lport *lport) | |||
712 | } | 712 | } |
713 | 713 | ||
714 | /** | 714 | /** |
715 | * fc_disc_init() - Initialize the discovery layer for a local port | 715 | * fc_disc_config() - Configure the discovery layer for a local port |
716 | * @lport: The local port that needs the discovery layer to be initialized | 716 | * @lport: The local port that needs the discovery layer to be configured |
717 | * @priv: Private data structre for users of the discovery layer | ||
717 | */ | 718 | */ |
718 | int fc_disc_init(struct fc_lport *lport) | 719 | void fc_disc_config(struct fc_lport *lport, void *priv) |
719 | { | 720 | { |
720 | struct fc_disc *disc; | 721 | struct fc_disc *disc = &lport->disc; |
721 | 722 | ||
722 | if (!lport->tt.disc_start) | 723 | if (!lport->tt.disc_start) |
723 | lport->tt.disc_start = fc_disc_start; | 724 | lport->tt.disc_start = fc_disc_start; |
@@ -732,12 +733,21 @@ int fc_disc_init(struct fc_lport *lport) | |||
732 | lport->tt.disc_recv_req = fc_disc_recv_req; | 733 | lport->tt.disc_recv_req = fc_disc_recv_req; |
733 | 734 | ||
734 | disc = &lport->disc; | 735 | disc = &lport->disc; |
736 | |||
737 | disc->priv = priv; | ||
738 | } | ||
739 | EXPORT_SYMBOL(fc_disc_config); | ||
740 | |||
741 | /** | ||
742 | * fc_disc_init() - Initialize the discovery layer for a local port | ||
743 | * @lport: The local port that needs the discovery layer to be initialized | ||
744 | */ | ||
745 | void fc_disc_init(struct fc_lport *lport) | ||
746 | { | ||
747 | struct fc_disc *disc = &lport->disc; | ||
748 | |||
735 | INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); | 749 | INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); |
736 | mutex_init(&disc->disc_mutex); | 750 | mutex_init(&disc->disc_mutex); |
737 | INIT_LIST_HEAD(&disc->rports); | 751 | INIT_LIST_HEAD(&disc->rports); |
738 | |||
739 | disc->priv = lport; | ||
740 | |||
741 | return 0; | ||
742 | } | 752 | } |
743 | EXPORT_SYMBOL(fc_disc_init); | 753 | EXPORT_SYMBOL(fc_disc_init); |
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index aec2e0da5016..55cbd0180159 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c | |||
@@ -235,6 +235,17 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
235 | linkrate = phy->linkrate; | 235 | linkrate = phy->linkrate; |
236 | memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); | 236 | memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); |
237 | 237 | ||
238 | /* Handle vacant phy - rest of dr data is not valid so skip it */ | ||
239 | if (phy->phy_state == PHY_VACANT) { | ||
240 | memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); | ||
241 | phy->attached_dev_type = NO_DEVICE; | ||
242 | if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { | ||
243 | phy->phy_id = phy_id; | ||
244 | goto skip; | ||
245 | } else | ||
246 | goto out; | ||
247 | } | ||
248 | |||
238 | phy->attached_dev_type = to_dev_type(dr); | 249 | phy->attached_dev_type = to_dev_type(dr); |
239 | if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) | 250 | if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) |
240 | goto out; | 251 | goto out; |
@@ -272,6 +283,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
272 | phy->phy->maximum_linkrate = dr->pmax_linkrate; | 283 | phy->phy->maximum_linkrate = dr->pmax_linkrate; |
273 | phy->phy->negotiated_linkrate = phy->linkrate; | 284 | phy->phy->negotiated_linkrate = phy->linkrate; |
274 | 285 | ||
286 | skip: | ||
275 | if (new_phy) | 287 | if (new_phy) |
276 | if (sas_phy_add(phy->phy)) { | 288 | if (sas_phy_add(phy->phy)) { |
277 | sas_phy_free(phy->phy); | 289 | sas_phy_free(phy->phy); |
@@ -388,7 +400,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single) | |||
388 | if (!disc_req) | 400 | if (!disc_req) |
389 | return -ENOMEM; | 401 | return -ENOMEM; |
390 | 402 | ||
391 | disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE); | 403 | disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); |
392 | if (!disc_resp) { | 404 | if (!disc_resp) { |
393 | kfree(disc_req); | 405 | kfree(disc_req); |
394 | return -ENOMEM; | 406 | return -ENOMEM; |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 74b67d98e952..d43faf34c1e2 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -438,11 +438,12 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, | |||
438 | struct lpfc_rqe *temp_hrqe; | 438 | struct lpfc_rqe *temp_hrqe; |
439 | struct lpfc_rqe *temp_drqe; | 439 | struct lpfc_rqe *temp_drqe; |
440 | struct lpfc_register doorbell; | 440 | struct lpfc_register doorbell; |
441 | int put_index = hq->host_index; | 441 | int put_index; |
442 | 442 | ||
443 | /* sanity check on queue memory */ | 443 | /* sanity check on queue memory */ |
444 | if (unlikely(!hq) || unlikely(!dq)) | 444 | if (unlikely(!hq) || unlikely(!dq)) |
445 | return -ENOMEM; | 445 | return -ENOMEM; |
446 | put_index = hq->host_index; | ||
446 | temp_hrqe = hq->qe[hq->host_index].rqe; | 447 | temp_hrqe = hq->qe[hq->host_index].rqe; |
447 | temp_drqe = dq->qe[dq->host_index].rqe; | 448 | temp_drqe = dq->qe[dq->host_index].rqe; |
448 | 449 | ||
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 1d82eef4e1eb..b3db9dcc2619 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1938,11 +1938,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
1938 | "Timer for the VP[%d] has stopped\n", vha->vp_idx); | 1938 | "Timer for the VP[%d] has stopped\n", vha->vp_idx); |
1939 | } | 1939 | } |
1940 | 1940 | ||
1941 | /* No pending activities shall be there on the vha now */ | ||
1942 | if (ql2xextended_error_logging & ql_dbg_user) | ||
1943 | msleep(random32()%10); /* Just to see if something falls on | ||
1944 | * the net we have placed below */ | ||
1945 | |||
1946 | BUG_ON(atomic_read(&vha->vref_count)); | 1941 | BUG_ON(atomic_read(&vha->vref_count)); |
1947 | 1942 | ||
1948 | qla2x00_free_fcports(vha); | 1943 | qla2x00_free_fcports(vha); |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 1626de52e32a..fbc305f1c87c 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * | Mailbox commands | 0x115b | 0x111a-0x111b | | 15 | * | Mailbox commands | 0x115b | 0x111a-0x111b | |
16 | * | | | 0x112c-0x112e | | 16 | * | | | 0x112c-0x112e | |
17 | * | | | 0x113a | | 17 | * | | | 0x113a | |
18 | * | | | 0x1155-0x1158 | | ||
18 | * | Device Discovery | 0x2087 | 0x2020-0x2022, | | 19 | * | Device Discovery | 0x2087 | 0x2020-0x2022, | |
19 | * | | | 0x2016 | | 20 | * | | | 0x2016 | |
20 | * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b | | 21 | * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b | |
@@ -401,7 +402,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, | |||
401 | void *ring; | 402 | void *ring; |
402 | } aq, *aqp; | 403 | } aq, *aqp; |
403 | 404 | ||
404 | if (!ha->tgt.atio_q_length) | 405 | if (!ha->tgt.atio_ring) |
405 | return ptr; | 406 | return ptr; |
406 | 407 | ||
407 | num_queues = 1; | 408 | num_queues = 1; |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index c6509911772b..65c5ff75936b 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -863,7 +863,6 @@ typedef struct { | |||
863 | #define MBX_1 BIT_1 | 863 | #define MBX_1 BIT_1 |
864 | #define MBX_0 BIT_0 | 864 | #define MBX_0 BIT_0 |
865 | 865 | ||
866 | #define RNID_TYPE_SET_VERSION 0x9 | ||
867 | #define RNID_TYPE_ASIC_TEMP 0xC | 866 | #define RNID_TYPE_ASIC_TEMP 0xC |
868 | 867 | ||
869 | /* | 868 | /* |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index eb3ca21a7f17..b310fa97b545 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -358,9 +358,6 @@ extern int | |||
358 | qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); | 358 | qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); |
359 | 359 | ||
360 | extern int | 360 | extern int |
361 | qla2x00_set_driver_version(scsi_qla_host_t *, char *); | ||
362 | |||
363 | extern int | ||
364 | qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, | 361 | qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, |
365 | uint16_t, uint16_t, uint16_t, uint16_t); | 362 | uint16_t, uint16_t, uint16_t, uint16_t); |
366 | 363 | ||
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index edf4d14a1335..b59203393cb2 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -619,8 +619,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
619 | if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) | 619 | if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) |
620 | qla24xx_read_fcp_prio_cfg(vha); | 620 | qla24xx_read_fcp_prio_cfg(vha); |
621 | 621 | ||
622 | qla2x00_set_driver_version(vha, QLA2XXX_VERSION); | ||
623 | |||
624 | return (rval); | 622 | return (rval); |
625 | } | 623 | } |
626 | 624 | ||
@@ -1399,7 +1397,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) | |||
1399 | mq_size += ha->max_rsp_queues * | 1397 | mq_size += ha->max_rsp_queues * |
1400 | (rsp->length * sizeof(response_t)); | 1398 | (rsp->length * sizeof(response_t)); |
1401 | } | 1399 | } |
1402 | if (ha->tgt.atio_q_length) | 1400 | if (ha->tgt.atio_ring) |
1403 | mq_size += ha->tgt.atio_q_length * sizeof(request_t); | 1401 | mq_size += ha->tgt.atio_q_length * sizeof(request_t); |
1404 | /* Allocate memory for Fibre Channel Event Buffer. */ | 1402 | /* Allocate memory for Fibre Channel Event Buffer. */ |
1405 | if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) | 1403 | if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 186dd59ce4fa..43345af56431 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -3866,64 +3866,6 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) | |||
3866 | return rval; | 3866 | return rval; |
3867 | } | 3867 | } |
3868 | 3868 | ||
3869 | int | ||
3870 | qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version) | ||
3871 | { | ||
3872 | int rval; | ||
3873 | mbx_cmd_t mc; | ||
3874 | mbx_cmd_t *mcp = &mc; | ||
3875 | int len; | ||
3876 | uint16_t dwlen; | ||
3877 | uint8_t *str; | ||
3878 | dma_addr_t str_dma; | ||
3879 | struct qla_hw_data *ha = vha->hw; | ||
3880 | |||
3881 | if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha)) | ||
3882 | return QLA_FUNCTION_FAILED; | ||
3883 | |||
3884 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155, | ||
3885 | "Entered %s.\n", __func__); | ||
3886 | |||
3887 | str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); | ||
3888 | if (!str) { | ||
3889 | ql_log(ql_log_warn, vha, 0x1156, | ||
3890 | "Failed to allocate driver version param.\n"); | ||
3891 | return QLA_MEMORY_ALLOC_FAILED; | ||
3892 | } | ||
3893 | |||
3894 | memcpy(str, "\x7\x3\x11\x0", 4); | ||
3895 | dwlen = str[0]; | ||
3896 | len = dwlen * sizeof(uint32_t) - 4; | ||
3897 | memset(str + 4, 0, len); | ||
3898 | if (len > strlen(version)) | ||
3899 | len = strlen(version); | ||
3900 | memcpy(str + 4, version, len); | ||
3901 | |||
3902 | mcp->mb[0] = MBC_SET_RNID_PARAMS; | ||
3903 | mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; | ||
3904 | mcp->mb[2] = MSW(LSD(str_dma)); | ||
3905 | mcp->mb[3] = LSW(LSD(str_dma)); | ||
3906 | mcp->mb[6] = MSW(MSD(str_dma)); | ||
3907 | mcp->mb[7] = LSW(MSD(str_dma)); | ||
3908 | mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | ||
3909 | mcp->in_mb = MBX_0; | ||
3910 | mcp->tov = MBX_TOV_SECONDS; | ||
3911 | mcp->flags = 0; | ||
3912 | rval = qla2x00_mailbox_command(vha, mcp); | ||
3913 | |||
3914 | if (rval != QLA_SUCCESS) { | ||
3915 | ql_dbg(ql_dbg_mbx, vha, 0x1157, | ||
3916 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | ||
3917 | } else { | ||
3918 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158, | ||
3919 | "Done %s.\n", __func__); | ||
3920 | } | ||
3921 | |||
3922 | dma_pool_free(ha->s_dma_pool, str, str_dma); | ||
3923 | |||
3924 | return rval; | ||
3925 | } | ||
3926 | |||
3927 | static int | 3869 | static int |
3928 | qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) | 3870 | qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) |
3929 | { | 3871 | { |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 2b6e478d9e33..ec54036d1e12 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.04.00.08-k" | 10 | #define QLA2XXX_VERSION "8.04.00.13-k" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 4 | 13 | #define QLA_DRIVER_MINOR_VER 4 |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 86974471af68..2a32036a9404 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -4112,6 +4112,10 @@ static int st_probe(struct device *dev) | |||
4112 | tpnt->disk = disk; | 4112 | tpnt->disk = disk; |
4113 | disk->private_data = &tpnt->driver; | 4113 | disk->private_data = &tpnt->driver; |
4114 | disk->queue = SDp->request_queue; | 4114 | disk->queue = SDp->request_queue; |
4115 | /* SCSI tape doesn't register this gendisk via add_disk(). Manually | ||
4116 | * take queue reference that release_disk() expects. */ | ||
4117 | if (!blk_get_queue(disk->queue)) | ||
4118 | goto out_put_disk; | ||
4115 | tpnt->driver = &st_template; | 4119 | tpnt->driver = &st_template; |
4116 | 4120 | ||
4117 | tpnt->device = SDp; | 4121 | tpnt->device = SDp; |
@@ -4185,7 +4189,7 @@ static int st_probe(struct device *dev) | |||
4185 | idr_preload_end(); | 4189 | idr_preload_end(); |
4186 | if (error < 0) { | 4190 | if (error < 0) { |
4187 | pr_warn("st: idr allocation failed: %d\n", error); | 4191 | pr_warn("st: idr allocation failed: %d\n", error); |
4188 | goto out_put_disk; | 4192 | goto out_put_queue; |
4189 | } | 4193 | } |
4190 | tpnt->index = error; | 4194 | tpnt->index = error; |
4191 | sprintf(disk->disk_name, "st%d", tpnt->index); | 4195 | sprintf(disk->disk_name, "st%d", tpnt->index); |
@@ -4211,6 +4215,8 @@ out_remove_devs: | |||
4211 | spin_lock(&st_index_lock); | 4215 | spin_lock(&st_index_lock); |
4212 | idr_remove(&st_index_idr, tpnt->index); | 4216 | idr_remove(&st_index_idr, tpnt->index); |
4213 | spin_unlock(&st_index_lock); | 4217 | spin_unlock(&st_index_lock); |
4218 | out_put_queue: | ||
4219 | blk_put_queue(disk->queue); | ||
4214 | out_put_disk: | 4220 | out_put_disk: |
4215 | put_disk(disk); | 4221 | put_disk(disk); |
4216 | kfree(tpnt); | 4222 | kfree(tpnt); |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f80eee74a311..2be0de920d67 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -55,6 +55,7 @@ comment "SPI Master Controller Drivers" | |||
55 | 55 | ||
56 | config SPI_ALTERA | 56 | config SPI_ALTERA |
57 | tristate "Altera SPI Controller" | 57 | tristate "Altera SPI Controller" |
58 | depends on GENERIC_HARDIRQS | ||
58 | select SPI_BITBANG | 59 | select SPI_BITBANG |
59 | help | 60 | help |
60 | This is the driver for the Altera SPI Controller. | 61 | This is the driver for the Altera SPI Controller. |
@@ -310,7 +311,7 @@ config SPI_PXA2XX_DMA | |||
310 | 311 | ||
311 | config SPI_PXA2XX | 312 | config SPI_PXA2XX |
312 | tristate "PXA2xx SSP SPI master" | 313 | tristate "PXA2xx SSP SPI master" |
313 | depends on ARCH_PXA || PCI || ACPI | 314 | depends on (ARCH_PXA || PCI || ACPI) && GENERIC_HARDIRQS |
314 | select PXA_SSP if ARCH_PXA | 315 | select PXA_SSP if ARCH_PXA |
315 | help | 316 | help |
316 | This enables using a PXA2xx or Sodaville SSP port as a SPI master | 317 | This enables using a PXA2xx or Sodaville SSP port as a SPI master |
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 9578af782a77..d7df435d962e 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c | |||
@@ -152,7 +152,6 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi, | |||
152 | static int bcm63xx_spi_setup(struct spi_device *spi) | 152 | static int bcm63xx_spi_setup(struct spi_device *spi) |
153 | { | 153 | { |
154 | struct bcm63xx_spi *bs; | 154 | struct bcm63xx_spi *bs; |
155 | int ret; | ||
156 | 155 | ||
157 | bs = spi_master_get_devdata(spi->master); | 156 | bs = spi_master_get_devdata(spi->master); |
158 | 157 | ||
@@ -490,7 +489,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) | |||
490 | default: | 489 | default: |
491 | dev_err(dev, "unsupported MSG_CTL width: %d\n", | 490 | dev_err(dev, "unsupported MSG_CTL width: %d\n", |
492 | bs->msg_ctl_width); | 491 | bs->msg_ctl_width); |
493 | goto out_clk_disable; | 492 | goto out_err; |
494 | } | 493 | } |
495 | 494 | ||
496 | /* Initialize hardware */ | 495 | /* Initialize hardware */ |
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index 89480b281d74..3e490ee7f275 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c | |||
@@ -164,7 +164,7 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, | |||
164 | 164 | ||
165 | for (i = count; i > 0; i--) { | 165 | for (i = count; i > 0; i--) { |
166 | data = tx_buf ? *tx_buf++ : 0; | 166 | data = tx_buf ? *tx_buf++ : 0; |
167 | if (len == EOFBYTE) | 167 | if (len == EOFBYTE && t->cs_change) |
168 | setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); | 168 | setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); |
169 | out_8(&fifo->txdata_8, data); | 169 | out_8(&fifo->txdata_8, data); |
170 | len--; | 170 | len--; |
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 90b27a3508a6..810413883c79 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -1168,7 +1168,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1168 | 1168 | ||
1169 | master->dev.parent = &pdev->dev; | 1169 | master->dev.parent = &pdev->dev; |
1170 | master->dev.of_node = pdev->dev.of_node; | 1170 | master->dev.of_node = pdev->dev.of_node; |
1171 | ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev)); | ||
1172 | /* the spi->mode bits understood by this driver: */ | 1171 | /* the spi->mode bits understood by this driver: */ |
1173 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; | 1172 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; |
1174 | 1173 | ||
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index e862ab8853aa..4188b2faac5c 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -994,25 +994,30 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data) | |||
994 | { | 994 | { |
995 | struct s3c64xx_spi_driver_data *sdd = data; | 995 | struct s3c64xx_spi_driver_data *sdd = data; |
996 | struct spi_master *spi = sdd->master; | 996 | struct spi_master *spi = sdd->master; |
997 | unsigned int val; | 997 | unsigned int val, clr = 0; |
998 | 998 | ||
999 | val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR); | 999 | val = readl(sdd->regs + S3C64XX_SPI_STATUS); |
1000 | 1000 | ||
1001 | val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR | | 1001 | if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) { |
1002 | S3C64XX_SPI_PND_RX_UNDERRUN_CLR | | 1002 | clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR; |
1003 | S3C64XX_SPI_PND_TX_OVERRUN_CLR | | ||
1004 | S3C64XX_SPI_PND_TX_UNDERRUN_CLR; | ||
1005 | |||
1006 | writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR); | ||
1007 | |||
1008 | if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR) | ||
1009 | dev_err(&spi->dev, "RX overrun\n"); | 1003 | dev_err(&spi->dev, "RX overrun\n"); |
1010 | if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR) | 1004 | } |
1005 | if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) { | ||
1006 | clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR; | ||
1011 | dev_err(&spi->dev, "RX underrun\n"); | 1007 | dev_err(&spi->dev, "RX underrun\n"); |
1012 | if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR) | 1008 | } |
1009 | if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) { | ||
1010 | clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR; | ||
1013 | dev_err(&spi->dev, "TX overrun\n"); | 1011 | dev_err(&spi->dev, "TX overrun\n"); |
1014 | if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR) | 1012 | } |
1013 | if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) { | ||
1014 | clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR; | ||
1015 | dev_err(&spi->dev, "TX underrun\n"); | 1015 | dev_err(&spi->dev, "TX underrun\n"); |
1016 | } | ||
1017 | |||
1018 | /* Clear the pending irq by setting and then clearing it */ | ||
1019 | writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR); | ||
1020 | writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR); | ||
1016 | 1021 | ||
1017 | return IRQ_HANDLED; | 1022 | return IRQ_HANDLED; |
1018 | } | 1023 | } |
@@ -1036,9 +1041,13 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) | |||
1036 | writel(0, regs + S3C64XX_SPI_MODE_CFG); | 1041 | writel(0, regs + S3C64XX_SPI_MODE_CFG); |
1037 | writel(0, regs + S3C64XX_SPI_PACKET_CNT); | 1042 | writel(0, regs + S3C64XX_SPI_PACKET_CNT); |
1038 | 1043 | ||
1039 | /* Clear any irq pending bits */ | 1044 | /* Clear any irq pending bits, should set and clear the bits */ |
1040 | writel(readl(regs + S3C64XX_SPI_PENDING_CLR), | 1045 | val = S3C64XX_SPI_PND_RX_OVERRUN_CLR | |
1041 | regs + S3C64XX_SPI_PENDING_CLR); | 1046 | S3C64XX_SPI_PND_RX_UNDERRUN_CLR | |
1047 | S3C64XX_SPI_PND_TX_OVERRUN_CLR | | ||
1048 | S3C64XX_SPI_PND_TX_UNDERRUN_CLR; | ||
1049 | writel(val, regs + S3C64XX_SPI_PENDING_CLR); | ||
1050 | writel(0, regs + S3C64XX_SPI_PENDING_CLR); | ||
1042 | 1051 | ||
1043 | writel(0, regs + S3C64XX_SPI_SWAP_CFG); | 1052 | writel(0, regs + S3C64XX_SPI_SWAP_CFG); |
1044 | 1053 | ||
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index b8698b389ef3..a829563f4713 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c | |||
@@ -858,21 +858,6 @@ static int tegra_slink_setup(struct spi_device *spi) | |||
858 | return 0; | 858 | return 0; |
859 | } | 859 | } |
860 | 860 | ||
861 | static int tegra_slink_prepare_transfer(struct spi_master *master) | ||
862 | { | ||
863 | struct tegra_slink_data *tspi = spi_master_get_devdata(master); | ||
864 | |||
865 | return pm_runtime_get_sync(tspi->dev); | ||
866 | } | ||
867 | |||
868 | static int tegra_slink_unprepare_transfer(struct spi_master *master) | ||
869 | { | ||
870 | struct tegra_slink_data *tspi = spi_master_get_devdata(master); | ||
871 | |||
872 | pm_runtime_put(tspi->dev); | ||
873 | return 0; | ||
874 | } | ||
875 | |||
876 | static int tegra_slink_transfer_one_message(struct spi_master *master, | 861 | static int tegra_slink_transfer_one_message(struct spi_master *master, |
877 | struct spi_message *msg) | 862 | struct spi_message *msg) |
878 | { | 863 | { |
@@ -885,6 +870,12 @@ static int tegra_slink_transfer_one_message(struct spi_master *master, | |||
885 | 870 | ||
886 | msg->status = 0; | 871 | msg->status = 0; |
887 | msg->actual_length = 0; | 872 | msg->actual_length = 0; |
873 | ret = pm_runtime_get_sync(tspi->dev); | ||
874 | if (ret < 0) { | ||
875 | dev_err(tspi->dev, "runtime get failed: %d\n", ret); | ||
876 | goto done; | ||
877 | } | ||
878 | |||
888 | single_xfer = list_is_singular(&msg->transfers); | 879 | single_xfer = list_is_singular(&msg->transfers); |
889 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 880 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
890 | INIT_COMPLETION(tspi->xfer_completion); | 881 | INIT_COMPLETION(tspi->xfer_completion); |
@@ -921,6 +912,8 @@ static int tegra_slink_transfer_one_message(struct spi_master *master, | |||
921 | exit: | 912 | exit: |
922 | tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); | 913 | tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); |
923 | tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); | 914 | tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); |
915 | pm_runtime_put(tspi->dev); | ||
916 | done: | ||
924 | msg->status = ret; | 917 | msg->status = ret; |
925 | spi_finalize_current_message(master); | 918 | spi_finalize_current_message(master); |
926 | return ret; | 919 | return ret; |
@@ -1148,9 +1141,7 @@ static int tegra_slink_probe(struct platform_device *pdev) | |||
1148 | /* the spi->mode bits understood by this driver: */ | 1141 | /* the spi->mode bits understood by this driver: */ |
1149 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | 1142 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; |
1150 | master->setup = tegra_slink_setup; | 1143 | master->setup = tegra_slink_setup; |
1151 | master->prepare_transfer_hardware = tegra_slink_prepare_transfer; | ||
1152 | master->transfer_one_message = tegra_slink_transfer_one_message; | 1144 | master->transfer_one_message = tegra_slink_transfer_one_message; |
1153 | master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer; | ||
1154 | master->num_chipselect = MAX_CHIP_SELECT; | 1145 | master->num_chipselect = MAX_CHIP_SELECT; |
1155 | master->bus_num = -1; | 1146 | master->bus_num = -1; |
1156 | 1147 | ||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index f996c600eb8c..004b10f184d4 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -543,17 +543,16 @@ static void spi_pump_messages(struct kthread_work *work) | |||
543 | /* Lock queue and check for queue work */ | 543 | /* Lock queue and check for queue work */ |
544 | spin_lock_irqsave(&master->queue_lock, flags); | 544 | spin_lock_irqsave(&master->queue_lock, flags); |
545 | if (list_empty(&master->queue) || !master->running) { | 545 | if (list_empty(&master->queue) || !master->running) { |
546 | if (master->busy && master->unprepare_transfer_hardware) { | 546 | if (!master->busy) { |
547 | ret = master->unprepare_transfer_hardware(master); | 547 | spin_unlock_irqrestore(&master->queue_lock, flags); |
548 | if (ret) { | 548 | return; |
549 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
550 | dev_err(&master->dev, | ||
551 | "failed to unprepare transfer hardware\n"); | ||
552 | return; | ||
553 | } | ||
554 | } | 549 | } |
555 | master->busy = false; | 550 | master->busy = false; |
556 | spin_unlock_irqrestore(&master->queue_lock, flags); | 551 | spin_unlock_irqrestore(&master->queue_lock, flags); |
552 | if (master->unprepare_transfer_hardware && | ||
553 | master->unprepare_transfer_hardware(master)) | ||
554 | dev_err(&master->dev, | ||
555 | "failed to unprepare transfer hardware\n"); | ||
557 | return; | 556 | return; |
558 | } | 557 | } |
559 | 558 | ||
@@ -984,7 +983,7 @@ static void acpi_register_spi_devices(struct spi_master *master) | |||
984 | acpi_status status; | 983 | acpi_status status; |
985 | acpi_handle handle; | 984 | acpi_handle handle; |
986 | 985 | ||
987 | handle = ACPI_HANDLE(&master->dev); | 986 | handle = ACPI_HANDLE(master->dev.parent); |
988 | if (!handle) | 987 | if (!handle) |
989 | return; | 988 | return; |
990 | 989 | ||
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c index 4c0f6d883dd3..7b0bce936762 100644 --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c | |||
@@ -675,3 +675,32 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc) | |||
675 | return 0; | 675 | return 0; |
676 | } | 676 | } |
677 | } | 677 | } |
678 | |||
679 | void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid) | ||
680 | { | ||
681 | u32 pmu_ctl = 0; | ||
682 | |||
683 | switch (cc->dev->bus->chip_id) { | ||
684 | case 0x4322: | ||
685 | ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070); | ||
686 | ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a); | ||
687 | ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854); | ||
688 | if (spuravoid == 1) | ||
689 | ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828); | ||
690 | else | ||
691 | ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828); | ||
692 | pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD; | ||
693 | break; | ||
694 | case 43222: | ||
695 | /* TODO: BCM43222 requires updating PLLs too */ | ||
696 | return; | ||
697 | default: | ||
698 | ssb_printk(KERN_ERR PFX | ||
699 | "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n", | ||
700 | cc->dev->bus->chip_id); | ||
701 | return; | ||
702 | } | ||
703 | |||
704 | chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl); | ||
705 | } | ||
706 | EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate); | ||
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index ff1c5ee352cb..cbe48ab41745 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
@@ -409,6 +409,7 @@ static inline int core_alua_state_standby( | |||
409 | case REPORT_LUNS: | 409 | case REPORT_LUNS: |
410 | case RECEIVE_DIAGNOSTIC: | 410 | case RECEIVE_DIAGNOSTIC: |
411 | case SEND_DIAGNOSTIC: | 411 | case SEND_DIAGNOSTIC: |
412 | return 0; | ||
412 | case MAINTENANCE_IN: | 413 | case MAINTENANCE_IN: |
413 | switch (cdb[1] & 0x1f) { | 414 | switch (cdb[1] & 0x1f) { |
414 | case MI_REPORT_TARGET_PGS: | 415 | case MI_REPORT_TARGET_PGS: |
@@ -451,6 +452,7 @@ static inline int core_alua_state_unavailable( | |||
451 | switch (cdb[0]) { | 452 | switch (cdb[0]) { |
452 | case INQUIRY: | 453 | case INQUIRY: |
453 | case REPORT_LUNS: | 454 | case REPORT_LUNS: |
455 | return 0; | ||
454 | case MAINTENANCE_IN: | 456 | case MAINTENANCE_IN: |
455 | switch (cdb[1] & 0x1f) { | 457 | switch (cdb[1] & 0x1f) { |
456 | case MI_REPORT_TARGET_PGS: | 458 | case MI_REPORT_TARGET_PGS: |
@@ -491,6 +493,7 @@ static inline int core_alua_state_transition( | |||
491 | switch (cdb[0]) { | 493 | switch (cdb[0]) { |
492 | case INQUIRY: | 494 | case INQUIRY: |
493 | case REPORT_LUNS: | 495 | case REPORT_LUNS: |
496 | return 0; | ||
494 | case MAINTENANCE_IN: | 497 | case MAINTENANCE_IN: |
495 | switch (cdb[1] & 0x1f) { | 498 | switch (cdb[1] & 0x1f) { |
496 | case MI_REPORT_TARGET_PGS: | 499 | case MI_REPORT_TARGET_PGS: |
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 484b6a3c9b03..302909ccf183 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c | |||
@@ -2643,9 +2643,9 @@ static int mxser_probe(struct pci_dev *pdev, | |||
2643 | mxvar_sdriver, brd->idx + i, &pdev->dev); | 2643 | mxvar_sdriver, brd->idx + i, &pdev->dev); |
2644 | if (IS_ERR(tty_dev)) { | 2644 | if (IS_ERR(tty_dev)) { |
2645 | retval = PTR_ERR(tty_dev); | 2645 | retval = PTR_ERR(tty_dev); |
2646 | for (i--; i >= 0; i--) | 2646 | for (; i > 0; i--) |
2647 | tty_unregister_device(mxvar_sdriver, | 2647 | tty_unregister_device(mxvar_sdriver, |
2648 | brd->idx + i); | 2648 | brd->idx + i - 1); |
2649 | goto err_relbrd; | 2649 | goto err_relbrd; |
2650 | } | 2650 | } |
2651 | } | 2651 | } |
@@ -2751,9 +2751,9 @@ static int __init mxser_module_init(void) | |||
2751 | tty_dev = tty_port_register_device(&brd->ports[i].port, | 2751 | tty_dev = tty_port_register_device(&brd->ports[i].port, |
2752 | mxvar_sdriver, brd->idx + i, NULL); | 2752 | mxvar_sdriver, brd->idx + i, NULL); |
2753 | if (IS_ERR(tty_dev)) { | 2753 | if (IS_ERR(tty_dev)) { |
2754 | for (i--; i >= 0; i--) | 2754 | for (; i > 0; i--) |
2755 | tty_unregister_device(mxvar_sdriver, | 2755 | tty_unregister_device(mxvar_sdriver, |
2756 | brd->idx + i); | 2756 | brd->idx + i - 1); |
2757 | for (i = 0; i < brd->info->nports; i++) | 2757 | for (i = 0; i < brd->info->nports; i++) |
2758 | tty_port_destroy(&brd->ports[i].port); | 2758 | tty_port_destroy(&brd->ports[i].port); |
2759 | free_irq(brd->irq, brd); | 2759 | free_irq(brd->irq, brd); |
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c index b3455a970a1d..35d9ab95c5cb 100644 --- a/drivers/tty/serial/8250/8250_pnp.c +++ b/drivers/tty/serial/8250/8250_pnp.c | |||
@@ -429,7 +429,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) | |||
429 | { | 429 | { |
430 | struct uart_8250_port uart; | 430 | struct uart_8250_port uart; |
431 | int ret, line, flags = dev_id->driver_data; | 431 | int ret, line, flags = dev_id->driver_data; |
432 | struct resource *res = NULL; | ||
433 | 432 | ||
434 | if (flags & UNKNOWN_DEV) { | 433 | if (flags & UNKNOWN_DEV) { |
435 | ret = serial_pnp_guess_board(dev); | 434 | ret = serial_pnp_guess_board(dev); |
@@ -440,12 +439,11 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) | |||
440 | memset(&uart, 0, sizeof(uart)); | 439 | memset(&uart, 0, sizeof(uart)); |
441 | if (pnp_irq_valid(dev, 0)) | 440 | if (pnp_irq_valid(dev, 0)) |
442 | uart.port.irq = pnp_irq(dev, 0); | 441 | uart.port.irq = pnp_irq(dev, 0); |
443 | if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) | 442 | if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) { |
444 | res = pnp_get_resource(dev, IORESOURCE_IO, 2); | 443 | uart.port.iobase = pnp_port_start(dev, 2); |
445 | else if (pnp_port_valid(dev, 0)) | 444 | uart.port.iotype = UPIO_PORT; |
446 | res = pnp_get_resource(dev, IORESOURCE_IO, 0); | 445 | } else if (pnp_port_valid(dev, 0)) { |
447 | if (pnp_resource_enabled(res)) { | 446 | uart.port.iobase = pnp_port_start(dev, 0); |
448 | uart.port.iobase = res->start; | ||
449 | uart.port.iotype = UPIO_PORT; | 447 | uart.port.iotype = UPIO_PORT; |
450 | } else if (pnp_mem_valid(dev, 0)) { | 448 | } else if (pnp_mem_valid(dev, 0)) { |
451 | uart.port.mapbase = pnp_mem_start(dev, 0); | 449 | uart.port.mapbase = pnp_mem_start(dev, 0); |
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 4dc41408ecb7..30d4f7a783cd 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c | |||
@@ -886,6 +886,17 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios, | |||
886 | serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); | 886 | serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); |
887 | /* FIFO ENABLE, DMA MODE */ | 887 | /* FIFO ENABLE, DMA MODE */ |
888 | 888 | ||
889 | up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK; | ||
890 | /* | ||
891 | * NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK | ||
892 | * sets Enables the granularity of 1 for TRIGGER RX | ||
893 | * level. Along with setting RX FIFO trigger level | ||
894 | * to 1 (as noted below, 16 characters) and TLR[3:0] | ||
895 | * to zero this will result RX FIFO threshold level | ||
896 | * to 1 character, instead of 16 as noted in comment | ||
897 | * below. | ||
898 | */ | ||
899 | |||
889 | /* Set receive FIFO threshold to 16 characters and | 900 | /* Set receive FIFO threshold to 16 characters and |
890 | * transmit FIFO threshold to 16 spaces | 901 | * transmit FIFO threshold to 16 spaces |
891 | */ | 902 | */ |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 05400acbc456..b0452688308c 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
@@ -941,6 +941,14 @@ void start_tty(struct tty_struct *tty) | |||
941 | 941 | ||
942 | EXPORT_SYMBOL(start_tty); | 942 | EXPORT_SYMBOL(start_tty); |
943 | 943 | ||
944 | static void tty_update_time(struct timespec *time) | ||
945 | { | ||
946 | unsigned long sec = get_seconds(); | ||
947 | sec -= sec % 60; | ||
948 | if ((long)(sec - time->tv_sec) > 0) | ||
949 | time->tv_sec = sec; | ||
950 | } | ||
951 | |||
944 | /** | 952 | /** |
945 | * tty_read - read method for tty device files | 953 | * tty_read - read method for tty device files |
946 | * @file: pointer to tty file | 954 | * @file: pointer to tty file |
@@ -960,10 +968,11 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, | |||
960 | loff_t *ppos) | 968 | loff_t *ppos) |
961 | { | 969 | { |
962 | int i; | 970 | int i; |
971 | struct inode *inode = file_inode(file); | ||
963 | struct tty_struct *tty = file_tty(file); | 972 | struct tty_struct *tty = file_tty(file); |
964 | struct tty_ldisc *ld; | 973 | struct tty_ldisc *ld; |
965 | 974 | ||
966 | if (tty_paranoia_check(tty, file_inode(file), "tty_read")) | 975 | if (tty_paranoia_check(tty, inode, "tty_read")) |
967 | return -EIO; | 976 | return -EIO; |
968 | if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags))) | 977 | if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags))) |
969 | return -EIO; | 978 | return -EIO; |
@@ -977,6 +986,9 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, | |||
977 | i = -EIO; | 986 | i = -EIO; |
978 | tty_ldisc_deref(ld); | 987 | tty_ldisc_deref(ld); |
979 | 988 | ||
989 | if (i > 0) | ||
990 | tty_update_time(&inode->i_atime); | ||
991 | |||
980 | return i; | 992 | return i; |
981 | } | 993 | } |
982 | 994 | ||
@@ -1077,8 +1089,10 @@ static inline ssize_t do_tty_write( | |||
1077 | break; | 1089 | break; |
1078 | cond_resched(); | 1090 | cond_resched(); |
1079 | } | 1091 | } |
1080 | if (written) | 1092 | if (written) { |
1093 | tty_update_time(&file_inode(file)->i_mtime); | ||
1081 | ret = written; | 1094 | ret = written; |
1095 | } | ||
1082 | out: | 1096 | out: |
1083 | tty_write_unlock(tty); | 1097 | tty_write_unlock(tty); |
1084 | return ret; | 1098 | return ret; |
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index 797f9d514732..65d4e55552c6 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c | |||
@@ -67,7 +67,6 @@ static void usb_port_device_release(struct device *dev) | |||
67 | { | 67 | { |
68 | struct usb_port *port_dev = to_usb_port(dev); | 68 | struct usb_port *port_dev = to_usb_port(dev); |
69 | 69 | ||
70 | dev_pm_qos_hide_flags(dev); | ||
71 | kfree(port_dev); | 70 | kfree(port_dev); |
72 | } | 71 | } |
73 | 72 | ||
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 8189cb6a86af..7abc5c81af2c 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c | |||
@@ -346,6 +346,7 @@ static long vfio_pci_ioctl(void *device_data, | |||
346 | 346 | ||
347 | if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { | 347 | if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { |
348 | size_t size; | 348 | size_t size; |
349 | int max = vfio_pci_get_irq_count(vdev, hdr.index); | ||
349 | 350 | ||
350 | if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) | 351 | if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) |
351 | size = sizeof(uint8_t); | 352 | size = sizeof(uint8_t); |
@@ -355,7 +356,7 @@ static long vfio_pci_ioctl(void *device_data, | |||
355 | return -EINVAL; | 356 | return -EINVAL; |
356 | 357 | ||
357 | if (hdr.argsz - minsz < hdr.count * size || | 358 | if (hdr.argsz - minsz < hdr.count * size || |
358 | hdr.count > vfio_pci_get_irq_count(vdev, hdr.index)) | 359 | hdr.start >= max || hdr.start + hdr.count > max) |
359 | return -EINVAL; | 360 | return -EINVAL; |
360 | 361 | ||
361 | data = memdup_user((void __user *)(arg + minsz), | 362 | data = memdup_user((void __user *)(arg + minsz), |
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 2968b4934659..957a0b98a5d9 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c | |||
@@ -74,9 +74,8 @@ enum { | |||
74 | 74 | ||
75 | struct vhost_scsi { | 75 | struct vhost_scsi { |
76 | /* Protected by vhost_scsi->dev.mutex */ | 76 | /* Protected by vhost_scsi->dev.mutex */ |
77 | struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET]; | 77 | struct tcm_vhost_tpg **vs_tpg; |
78 | char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; | 78 | char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; |
79 | bool vs_endpoint; | ||
80 | 79 | ||
81 | struct vhost_dev dev; | 80 | struct vhost_dev dev; |
82 | struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; | 81 | struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; |
@@ -579,9 +578,27 @@ static void tcm_vhost_submission_work(struct work_struct *work) | |||
579 | } | 578 | } |
580 | } | 579 | } |
581 | 580 | ||
581 | static void vhost_scsi_send_bad_target(struct vhost_scsi *vs, | ||
582 | struct vhost_virtqueue *vq, int head, unsigned out) | ||
583 | { | ||
584 | struct virtio_scsi_cmd_resp __user *resp; | ||
585 | struct virtio_scsi_cmd_resp rsp; | ||
586 | int ret; | ||
587 | |||
588 | memset(&rsp, 0, sizeof(rsp)); | ||
589 | rsp.response = VIRTIO_SCSI_S_BAD_TARGET; | ||
590 | resp = vq->iov[out].iov_base; | ||
591 | ret = __copy_to_user(resp, &rsp, sizeof(rsp)); | ||
592 | if (!ret) | ||
593 | vhost_add_used_and_signal(&vs->dev, vq, head, 0); | ||
594 | else | ||
595 | pr_err("Faulted on virtio_scsi_cmd_resp\n"); | ||
596 | } | ||
597 | |||
582 | static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | 598 | static void vhost_scsi_handle_vq(struct vhost_scsi *vs, |
583 | struct vhost_virtqueue *vq) | 599 | struct vhost_virtqueue *vq) |
584 | { | 600 | { |
601 | struct tcm_vhost_tpg **vs_tpg; | ||
585 | struct virtio_scsi_cmd_req v_req; | 602 | struct virtio_scsi_cmd_req v_req; |
586 | struct tcm_vhost_tpg *tv_tpg; | 603 | struct tcm_vhost_tpg *tv_tpg; |
587 | struct tcm_vhost_cmd *tv_cmd; | 604 | struct tcm_vhost_cmd *tv_cmd; |
@@ -590,8 +607,16 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
590 | int head, ret; | 607 | int head, ret; |
591 | u8 target; | 608 | u8 target; |
592 | 609 | ||
593 | /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ | 610 | /* |
594 | if (unlikely(!vs->vs_endpoint)) | 611 | * We can handle the vq only after the endpoint is setup by calling the |
612 | * VHOST_SCSI_SET_ENDPOINT ioctl. | ||
613 | * | ||
614 | * TODO: Check that we are running from vhost_worker which acts | ||
615 | * as read-side critical section for vhost kind of RCU. | ||
616 | * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h | ||
617 | */ | ||
618 | vs_tpg = rcu_dereference_check(vq->private_data, 1); | ||
619 | if (!vs_tpg) | ||
595 | return; | 620 | return; |
596 | 621 | ||
597 | mutex_lock(&vq->mutex); | 622 | mutex_lock(&vq->mutex); |
@@ -661,23 +686,11 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
661 | 686 | ||
662 | /* Extract the tpgt */ | 687 | /* Extract the tpgt */ |
663 | target = v_req.lun[1]; | 688 | target = v_req.lun[1]; |
664 | tv_tpg = vs->vs_tpg[target]; | 689 | tv_tpg = ACCESS_ONCE(vs_tpg[target]); |
665 | 690 | ||
666 | /* Target does not exist, fail the request */ | 691 | /* Target does not exist, fail the request */ |
667 | if (unlikely(!tv_tpg)) { | 692 | if (unlikely(!tv_tpg)) { |
668 | struct virtio_scsi_cmd_resp __user *resp; | 693 | vhost_scsi_send_bad_target(vs, vq, head, out); |
669 | struct virtio_scsi_cmd_resp rsp; | ||
670 | |||
671 | memset(&rsp, 0, sizeof(rsp)); | ||
672 | rsp.response = VIRTIO_SCSI_S_BAD_TARGET; | ||
673 | resp = vq->iov[out].iov_base; | ||
674 | ret = __copy_to_user(resp, &rsp, sizeof(rsp)); | ||
675 | if (!ret) | ||
676 | vhost_add_used_and_signal(&vs->dev, | ||
677 | vq, head, 0); | ||
678 | else | ||
679 | pr_err("Faulted on virtio_scsi_cmd_resp\n"); | ||
680 | |||
681 | continue; | 694 | continue; |
682 | } | 695 | } |
683 | 696 | ||
@@ -690,22 +703,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
690 | if (IS_ERR(tv_cmd)) { | 703 | if (IS_ERR(tv_cmd)) { |
691 | vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", | 704 | vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", |
692 | PTR_ERR(tv_cmd)); | 705 | PTR_ERR(tv_cmd)); |
693 | break; | 706 | goto err_cmd; |
694 | } | 707 | } |
695 | pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" | 708 | pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" |
696 | ": %d\n", tv_cmd, exp_data_len, data_direction); | 709 | ": %d\n", tv_cmd, exp_data_len, data_direction); |
697 | 710 | ||
698 | tv_cmd->tvc_vhost = vs; | 711 | tv_cmd->tvc_vhost = vs; |
699 | tv_cmd->tvc_vq = vq; | 712 | tv_cmd->tvc_vq = vq; |
700 | |||
701 | if (unlikely(vq->iov[out].iov_len != | ||
702 | sizeof(struct virtio_scsi_cmd_resp))) { | ||
703 | vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" | ||
704 | " bytes, out: %d, in: %d\n", | ||
705 | vq->iov[out].iov_len, out, in); | ||
706 | break; | ||
707 | } | ||
708 | |||
709 | tv_cmd->tvc_resp = vq->iov[out].iov_base; | 713 | tv_cmd->tvc_resp = vq->iov[out].iov_base; |
710 | 714 | ||
711 | /* | 715 | /* |
@@ -725,7 +729,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
725 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", | 729 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", |
726 | scsi_command_size(tv_cmd->tvc_cdb), | 730 | scsi_command_size(tv_cmd->tvc_cdb), |
727 | TCM_VHOST_MAX_CDB_SIZE); | 731 | TCM_VHOST_MAX_CDB_SIZE); |
728 | break; /* TODO */ | 732 | goto err_free; |
729 | } | 733 | } |
730 | tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; | 734 | tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; |
731 | 735 | ||
@@ -738,7 +742,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
738 | data_direction == DMA_TO_DEVICE); | 742 | data_direction == DMA_TO_DEVICE); |
739 | if (unlikely(ret)) { | 743 | if (unlikely(ret)) { |
740 | vq_err(vq, "Failed to map iov to sgl\n"); | 744 | vq_err(vq, "Failed to map iov to sgl\n"); |
741 | break; /* TODO */ | 745 | goto err_free; |
742 | } | 746 | } |
743 | } | 747 | } |
744 | 748 | ||
@@ -759,6 +763,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
759 | } | 763 | } |
760 | 764 | ||
761 | mutex_unlock(&vq->mutex); | 765 | mutex_unlock(&vq->mutex); |
766 | return; | ||
767 | |||
768 | err_free: | ||
769 | vhost_scsi_free_cmd(tv_cmd); | ||
770 | err_cmd: | ||
771 | vhost_scsi_send_bad_target(vs, vq, head, out); | ||
772 | mutex_unlock(&vq->mutex); | ||
762 | } | 773 | } |
763 | 774 | ||
764 | static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) | 775 | static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) |
@@ -780,6 +791,20 @@ static void vhost_scsi_handle_kick(struct vhost_work *work) | |||
780 | vhost_scsi_handle_vq(vs, vq); | 791 | vhost_scsi_handle_vq(vs, vq); |
781 | } | 792 | } |
782 | 793 | ||
794 | static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) | ||
795 | { | ||
796 | vhost_poll_flush(&vs->dev.vqs[index].poll); | ||
797 | } | ||
798 | |||
799 | static void vhost_scsi_flush(struct vhost_scsi *vs) | ||
800 | { | ||
801 | int i; | ||
802 | |||
803 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) | ||
804 | vhost_scsi_flush_vq(vs, i); | ||
805 | vhost_work_flush(&vs->dev, &vs->vs_completion_work); | ||
806 | } | ||
807 | |||
783 | /* | 808 | /* |
784 | * Called from vhost_scsi_ioctl() context to walk the list of available | 809 | * Called from vhost_scsi_ioctl() context to walk the list of available |
785 | * tcm_vhost_tpg with an active struct tcm_vhost_nexus | 810 | * tcm_vhost_tpg with an active struct tcm_vhost_nexus |
@@ -790,8 +815,10 @@ static int vhost_scsi_set_endpoint( | |||
790 | { | 815 | { |
791 | struct tcm_vhost_tport *tv_tport; | 816 | struct tcm_vhost_tport *tv_tport; |
792 | struct tcm_vhost_tpg *tv_tpg; | 817 | struct tcm_vhost_tpg *tv_tpg; |
818 | struct tcm_vhost_tpg **vs_tpg; | ||
819 | struct vhost_virtqueue *vq; | ||
820 | int index, ret, i, len; | ||
793 | bool match = false; | 821 | bool match = false; |
794 | int index, ret; | ||
795 | 822 | ||
796 | mutex_lock(&vs->dev.mutex); | 823 | mutex_lock(&vs->dev.mutex); |
797 | /* Verify that ring has been setup correctly. */ | 824 | /* Verify that ring has been setup correctly. */ |
@@ -803,6 +830,15 @@ static int vhost_scsi_set_endpoint( | |||
803 | } | 830 | } |
804 | } | 831 | } |
805 | 832 | ||
833 | len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; | ||
834 | vs_tpg = kzalloc(len, GFP_KERNEL); | ||
835 | if (!vs_tpg) { | ||
836 | mutex_unlock(&vs->dev.mutex); | ||
837 | return -ENOMEM; | ||
838 | } | ||
839 | if (vs->vs_tpg) | ||
840 | memcpy(vs_tpg, vs->vs_tpg, len); | ||
841 | |||
806 | mutex_lock(&tcm_vhost_mutex); | 842 | mutex_lock(&tcm_vhost_mutex); |
807 | list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { | 843 | list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { |
808 | mutex_lock(&tv_tpg->tv_tpg_mutex); | 844 | mutex_lock(&tv_tpg->tv_tpg_mutex); |
@@ -817,14 +853,15 @@ static int vhost_scsi_set_endpoint( | |||
817 | tv_tport = tv_tpg->tport; | 853 | tv_tport = tv_tpg->tport; |
818 | 854 | ||
819 | if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { | 855 | if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { |
820 | if (vs->vs_tpg[tv_tpg->tport_tpgt]) { | 856 | if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) { |
821 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 857 | mutex_unlock(&tv_tpg->tv_tpg_mutex); |
822 | mutex_unlock(&tcm_vhost_mutex); | 858 | mutex_unlock(&tcm_vhost_mutex); |
823 | mutex_unlock(&vs->dev.mutex); | 859 | mutex_unlock(&vs->dev.mutex); |
860 | kfree(vs_tpg); | ||
824 | return -EEXIST; | 861 | return -EEXIST; |
825 | } | 862 | } |
826 | tv_tpg->tv_tpg_vhost_count++; | 863 | tv_tpg->tv_tpg_vhost_count++; |
827 | vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; | 864 | vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; |
828 | smp_mb__after_atomic_inc(); | 865 | smp_mb__after_atomic_inc(); |
829 | match = true; | 866 | match = true; |
830 | } | 867 | } |
@@ -835,12 +872,27 @@ static int vhost_scsi_set_endpoint( | |||
835 | if (match) { | 872 | if (match) { |
836 | memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, | 873 | memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, |
837 | sizeof(vs->vs_vhost_wwpn)); | 874 | sizeof(vs->vs_vhost_wwpn)); |
838 | vs->vs_endpoint = true; | 875 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { |
876 | vq = &vs->vqs[i]; | ||
877 | /* Flushing the vhost_work acts as synchronize_rcu */ | ||
878 | mutex_lock(&vq->mutex); | ||
879 | rcu_assign_pointer(vq->private_data, vs_tpg); | ||
880 | vhost_init_used(vq); | ||
881 | mutex_unlock(&vq->mutex); | ||
882 | } | ||
839 | ret = 0; | 883 | ret = 0; |
840 | } else { | 884 | } else { |
841 | ret = -EEXIST; | 885 | ret = -EEXIST; |
842 | } | 886 | } |
843 | 887 | ||
888 | /* | ||
889 | * Act as synchronize_rcu to make sure access to | ||
890 | * old vs->vs_tpg is finished. | ||
891 | */ | ||
892 | vhost_scsi_flush(vs); | ||
893 | kfree(vs->vs_tpg); | ||
894 | vs->vs_tpg = vs_tpg; | ||
895 | |||
844 | mutex_unlock(&vs->dev.mutex); | 896 | mutex_unlock(&vs->dev.mutex); |
845 | return ret; | 897 | return ret; |
846 | } | 898 | } |
@@ -851,6 +903,8 @@ static int vhost_scsi_clear_endpoint( | |||
851 | { | 903 | { |
852 | struct tcm_vhost_tport *tv_tport; | 904 | struct tcm_vhost_tport *tv_tport; |
853 | struct tcm_vhost_tpg *tv_tpg; | 905 | struct tcm_vhost_tpg *tv_tpg; |
906 | struct vhost_virtqueue *vq; | ||
907 | bool match = false; | ||
854 | int index, ret, i; | 908 | int index, ret, i; |
855 | u8 target; | 909 | u8 target; |
856 | 910 | ||
@@ -862,9 +916,14 @@ static int vhost_scsi_clear_endpoint( | |||
862 | goto err_dev; | 916 | goto err_dev; |
863 | } | 917 | } |
864 | } | 918 | } |
919 | |||
920 | if (!vs->vs_tpg) { | ||
921 | mutex_unlock(&vs->dev.mutex); | ||
922 | return 0; | ||
923 | } | ||
924 | |||
865 | for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { | 925 | for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { |
866 | target = i; | 926 | target = i; |
867 | |||
868 | tv_tpg = vs->vs_tpg[target]; | 927 | tv_tpg = vs->vs_tpg[target]; |
869 | if (!tv_tpg) | 928 | if (!tv_tpg) |
870 | continue; | 929 | continue; |
@@ -886,10 +945,27 @@ static int vhost_scsi_clear_endpoint( | |||
886 | } | 945 | } |
887 | tv_tpg->tv_tpg_vhost_count--; | 946 | tv_tpg->tv_tpg_vhost_count--; |
888 | vs->vs_tpg[target] = NULL; | 947 | vs->vs_tpg[target] = NULL; |
889 | vs->vs_endpoint = false; | 948 | match = true; |
890 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 949 | mutex_unlock(&tv_tpg->tv_tpg_mutex); |
891 | } | 950 | } |
951 | if (match) { | ||
952 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { | ||
953 | vq = &vs->vqs[i]; | ||
954 | /* Flushing the vhost_work acts as synchronize_rcu */ | ||
955 | mutex_lock(&vq->mutex); | ||
956 | rcu_assign_pointer(vq->private_data, NULL); | ||
957 | mutex_unlock(&vq->mutex); | ||
958 | } | ||
959 | } | ||
960 | /* | ||
961 | * Act as synchronize_rcu to make sure access to | ||
962 | * old vs->vs_tpg is finished. | ||
963 | */ | ||
964 | vhost_scsi_flush(vs); | ||
965 | kfree(vs->vs_tpg); | ||
966 | vs->vs_tpg = NULL; | ||
892 | mutex_unlock(&vs->dev.mutex); | 967 | mutex_unlock(&vs->dev.mutex); |
968 | |||
893 | return 0; | 969 | return 0; |
894 | 970 | ||
895 | err_tpg: | 971 | err_tpg: |
@@ -899,6 +975,24 @@ err_dev: | |||
899 | return ret; | 975 | return ret; |
900 | } | 976 | } |
901 | 977 | ||
978 | static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) | ||
979 | { | ||
980 | if (features & ~VHOST_SCSI_FEATURES) | ||
981 | return -EOPNOTSUPP; | ||
982 | |||
983 | mutex_lock(&vs->dev.mutex); | ||
984 | if ((features & (1 << VHOST_F_LOG_ALL)) && | ||
985 | !vhost_log_access_ok(&vs->dev)) { | ||
986 | mutex_unlock(&vs->dev.mutex); | ||
987 | return -EFAULT; | ||
988 | } | ||
989 | vs->dev.acked_features = features; | ||
990 | smp_wmb(); | ||
991 | vhost_scsi_flush(vs); | ||
992 | mutex_unlock(&vs->dev.mutex); | ||
993 | return 0; | ||
994 | } | ||
995 | |||
902 | static int vhost_scsi_open(struct inode *inode, struct file *f) | 996 | static int vhost_scsi_open(struct inode *inode, struct file *f) |
903 | { | 997 | { |
904 | struct vhost_scsi *s; | 998 | struct vhost_scsi *s; |
@@ -939,38 +1033,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f) | |||
939 | return 0; | 1033 | return 0; |
940 | } | 1034 | } |
941 | 1035 | ||
942 | static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) | ||
943 | { | ||
944 | vhost_poll_flush(&vs->dev.vqs[index].poll); | ||
945 | } | ||
946 | |||
947 | static void vhost_scsi_flush(struct vhost_scsi *vs) | ||
948 | { | ||
949 | int i; | ||
950 | |||
951 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) | ||
952 | vhost_scsi_flush_vq(vs, i); | ||
953 | vhost_work_flush(&vs->dev, &vs->vs_completion_work); | ||
954 | } | ||
955 | |||
956 | static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) | ||
957 | { | ||
958 | if (features & ~VHOST_SCSI_FEATURES) | ||
959 | return -EOPNOTSUPP; | ||
960 | |||
961 | mutex_lock(&vs->dev.mutex); | ||
962 | if ((features & (1 << VHOST_F_LOG_ALL)) && | ||
963 | !vhost_log_access_ok(&vs->dev)) { | ||
964 | mutex_unlock(&vs->dev.mutex); | ||
965 | return -EFAULT; | ||
966 | } | ||
967 | vs->dev.acked_features = features; | ||
968 | smp_wmb(); | ||
969 | vhost_scsi_flush(vs); | ||
970 | mutex_unlock(&vs->dev.mutex); | ||
971 | return 0; | ||
972 | } | ||
973 | |||
974 | static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, | 1036 | static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, |
975 | unsigned long arg) | 1037 | unsigned long arg) |
976 | { | 1038 | { |
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 7c254084b6a0..86291dcd964a 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c | |||
@@ -1373,15 +1373,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) | |||
1373 | { | 1373 | { |
1374 | struct fb_info *info = file_fb_info(file); | 1374 | struct fb_info *info = file_fb_info(file); |
1375 | struct fb_ops *fb; | 1375 | struct fb_ops *fb; |
1376 | unsigned long off; | 1376 | unsigned long mmio_pgoff; |
1377 | unsigned long start; | 1377 | unsigned long start; |
1378 | u32 len; | 1378 | u32 len; |
1379 | 1379 | ||
1380 | if (!info) | 1380 | if (!info) |
1381 | return -ENODEV; | 1381 | return -ENODEV; |
1382 | if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) | ||
1383 | return -EINVAL; | ||
1384 | off = vma->vm_pgoff << PAGE_SHIFT; | ||
1385 | fb = info->fbops; | 1382 | fb = info->fbops; |
1386 | if (!fb) | 1383 | if (!fb) |
1387 | return -ENODEV; | 1384 | return -ENODEV; |
@@ -1393,32 +1390,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) | |||
1393 | return res; | 1390 | return res; |
1394 | } | 1391 | } |
1395 | 1392 | ||
1396 | /* frame buffer memory */ | 1393 | /* |
1394 | * Ugh. This can be either the frame buffer mapping, or | ||
1395 | * if pgoff points past it, the mmio mapping. | ||
1396 | */ | ||
1397 | start = info->fix.smem_start; | 1397 | start = info->fix.smem_start; |
1398 | len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); | 1398 | len = info->fix.smem_len; |
1399 | if (off >= len) { | 1399 | mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT; |
1400 | /* memory mapped io */ | 1400 | if (vma->vm_pgoff >= mmio_pgoff) { |
1401 | off -= len; | 1401 | vma->vm_pgoff -= mmio_pgoff; |
1402 | if (info->var.accel_flags) { | ||
1403 | mutex_unlock(&info->mm_lock); | ||
1404 | return -EINVAL; | ||
1405 | } | ||
1406 | start = info->fix.mmio_start; | 1402 | start = info->fix.mmio_start; |
1407 | len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); | 1403 | len = info->fix.mmio_len; |
1408 | } | 1404 | } |
1409 | mutex_unlock(&info->mm_lock); | 1405 | mutex_unlock(&info->mm_lock); |
1410 | start &= PAGE_MASK; | 1406 | |
1411 | if ((vma->vm_end - vma->vm_start + off) > len) | ||
1412 | return -EINVAL; | ||
1413 | off += start; | ||
1414 | vma->vm_pgoff = off >> PAGE_SHIFT; | ||
1415 | /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/ | ||
1416 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | 1407 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
1417 | fb_pgprotect(file, vma, off); | 1408 | fb_pgprotect(file, vma, start); |
1418 | if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, | 1409 | |
1419 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) | 1410 | return vm_iomap_memory(vma, start, len); |
1420 | return -EAGAIN; | ||
1421 | return 0; | ||
1422 | } | 1411 | } |
1423 | 1412 | ||
1424 | static int | 1413 | static int |
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c index 94ad0f71383c..7f6709991a5c 100644 --- a/drivers/video/fbmon.c +++ b/drivers/video/fbmon.c | |||
@@ -1400,7 +1400,7 @@ int fb_videomode_from_videomode(const struct videomode *vm, | |||
1400 | fbmode->vmode = 0; | 1400 | fbmode->vmode = 0; |
1401 | if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) | 1401 | if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) |
1402 | fbmode->sync |= FB_SYNC_HOR_HIGH_ACT; | 1402 | fbmode->sync |= FB_SYNC_HOR_HIGH_ACT; |
1403 | if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) | 1403 | if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH) |
1404 | fbmode->sync |= FB_SYNC_VERT_HIGH_ACT; | 1404 | fbmode->sync |= FB_SYNC_VERT_HIGH_ACT; |
1405 | if (vm->data_flags & DISPLAY_FLAGS_INTERLACED) | 1405 | if (vm->data_flags & DISPLAY_FLAGS_INTERLACED) |
1406 | fbmode->vmode |= FB_VMODE_INTERLACED; | 1406 | fbmode->vmode |= FB_VMODE_INTERLACED; |
diff --git a/drivers/video/mmp/core.c b/drivers/video/mmp/core.c index 9ed83419038b..84de2632857a 100644 --- a/drivers/video/mmp/core.c +++ b/drivers/video/mmp/core.c | |||
@@ -252,7 +252,5 @@ void mmp_unregister_path(struct mmp_path *path) | |||
252 | 252 | ||
253 | kfree(path); | 253 | kfree(path); |
254 | mutex_unlock(&disp_lock); | 254 | mutex_unlock(&disp_lock); |
255 | |||
256 | dev_info(path->dev, "de-register %s\n", path->name); | ||
257 | } | 255 | } |
258 | EXPORT_SYMBOL_GPL(mmp_unregister_path); | 256 | EXPORT_SYMBOL_GPL(mmp_unregister_path); |
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 63203acef812..0264704a52be 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c | |||
@@ -858,6 +858,7 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch) | |||
858 | tmp = ((mode->xres & 7) << 24) | ((display_h_total & 7) << 16) | 858 | tmp = ((mode->xres & 7) << 24) | ((display_h_total & 7) << 16) |
859 | | ((mode->hsync_len & 7) << 8) | (hsync_pos & 7); | 859 | | ((mode->hsync_len & 7) << 8) | (hsync_pos & 7); |
860 | lcdc_write_chan(ch, LDHAJR, tmp); | 860 | lcdc_write_chan(ch, LDHAJR, tmp); |
861 | lcdc_write_chan_mirror(ch, LDHAJR, tmp); | ||
861 | } | 862 | } |
862 | 863 | ||
863 | static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl) | 864 | static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl) |
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index b75db0186488..d4284458377e 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c | |||
@@ -1973,7 +1973,8 @@ static int uvesafb_init(void) | |||
1973 | err = -ENOMEM; | 1973 | err = -ENOMEM; |
1974 | 1974 | ||
1975 | if (err) { | 1975 | if (err) { |
1976 | platform_device_put(uvesafb_device); | 1976 | if (uvesafb_device) |
1977 | platform_device_put(uvesafb_device); | ||
1977 | platform_driver_unregister(&uvesafb_driver); | 1978 | platform_driver_unregister(&uvesafb_driver); |
1978 | cn_del_callback(&uvesafb_cn_id); | 1979 | cn_del_callback(&uvesafb_cn_id); |
1979 | return err; | 1980 | return err; |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 9fcc70c11cea..e89fc3133972 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -117,7 +117,7 @@ config ARM_SP805_WATCHDOG | |||
117 | 117 | ||
118 | config AT91RM9200_WATCHDOG | 118 | config AT91RM9200_WATCHDOG |
119 | tristate "AT91RM9200 watchdog" | 119 | tristate "AT91RM9200 watchdog" |
120 | depends on ARCH_AT91 | 120 | depends on ARCH_AT91RM9200 |
121 | help | 121 | help |
122 | Watchdog timer embedded into AT91RM9200 chips. This will reboot your | 122 | Watchdog timer embedded into AT91RM9200 chips. This will reboot your |
123 | system when the timeout is reached. | 123 | system when the timeout is reached. |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index aa85881d17b2..2647ad8e1f19 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -1316,7 +1316,7 @@ static void __xen_evtchn_do_upcall(void) | |||
1316 | { | 1316 | { |
1317 | int start_word_idx, start_bit_idx; | 1317 | int start_word_idx, start_bit_idx; |
1318 | int word_idx, bit_idx; | 1318 | int word_idx, bit_idx; |
1319 | int i; | 1319 | int i, irq; |
1320 | int cpu = get_cpu(); | 1320 | int cpu = get_cpu(); |
1321 | struct shared_info *s = HYPERVISOR_shared_info; | 1321 | struct shared_info *s = HYPERVISOR_shared_info; |
1322 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); | 1322 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); |
@@ -1324,6 +1324,8 @@ static void __xen_evtchn_do_upcall(void) | |||
1324 | 1324 | ||
1325 | do { | 1325 | do { |
1326 | xen_ulong_t pending_words; | 1326 | xen_ulong_t pending_words; |
1327 | xen_ulong_t pending_bits; | ||
1328 | struct irq_desc *desc; | ||
1327 | 1329 | ||
1328 | vcpu_info->evtchn_upcall_pending = 0; | 1330 | vcpu_info->evtchn_upcall_pending = 0; |
1329 | 1331 | ||
@@ -1335,6 +1337,17 @@ static void __xen_evtchn_do_upcall(void) | |||
1335 | * selector flag. xchg_xen_ulong must contain an | 1337 | * selector flag. xchg_xen_ulong must contain an |
1336 | * appropriate barrier. | 1338 | * appropriate barrier. |
1337 | */ | 1339 | */ |
1340 | if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) { | ||
1341 | int evtchn = evtchn_from_irq(irq); | ||
1342 | word_idx = evtchn / BITS_PER_LONG; | ||
1343 | pending_bits = evtchn % BITS_PER_LONG; | ||
1344 | if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) { | ||
1345 | desc = irq_to_desc(irq); | ||
1346 | if (desc) | ||
1347 | generic_handle_irq_desc(irq, desc); | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1338 | pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0); | 1351 | pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0); |
1339 | 1352 | ||
1340 | start_word_idx = __this_cpu_read(current_word_idx); | 1353 | start_word_idx = __this_cpu_read(current_word_idx); |
@@ -1343,7 +1356,6 @@ static void __xen_evtchn_do_upcall(void) | |||
1343 | word_idx = start_word_idx; | 1356 | word_idx = start_word_idx; |
1344 | 1357 | ||
1345 | for (i = 0; pending_words != 0; i++) { | 1358 | for (i = 0; pending_words != 0; i++) { |
1346 | xen_ulong_t pending_bits; | ||
1347 | xen_ulong_t words; | 1359 | xen_ulong_t words; |
1348 | 1360 | ||
1349 | words = MASK_LSBS(pending_words, word_idx); | 1361 | words = MASK_LSBS(pending_words, word_idx); |
@@ -1372,8 +1384,7 @@ static void __xen_evtchn_do_upcall(void) | |||
1372 | 1384 | ||
1373 | do { | 1385 | do { |
1374 | xen_ulong_t bits; | 1386 | xen_ulong_t bits; |
1375 | int port, irq; | 1387 | int port; |
1376 | struct irq_desc *desc; | ||
1377 | 1388 | ||
1378 | bits = MASK_LSBS(pending_bits, bit_idx); | 1389 | bits = MASK_LSBS(pending_bits, bit_idx); |
1379 | 1390 | ||