diff options
Diffstat (limited to 'drivers')
267 files changed, 3542 insertions, 1984 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 959d41acc108..d7d32c28829b 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -67,6 +67,8 @@ enum ec_command { | |||
| 67 | #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ | 67 | #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ |
| 68 | #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ | 68 | #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ |
| 69 | #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ | 69 | #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ |
| 70 | #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query | ||
| 71 | * when trying to clear the EC */ | ||
| 70 | 72 | ||
| 71 | enum { | 73 | enum { |
| 72 | EC_FLAGS_QUERY_PENDING, /* Query is pending */ | 74 | EC_FLAGS_QUERY_PENDING, /* Query is pending */ |
| @@ -116,6 +118,7 @@ EXPORT_SYMBOL(first_ec); | |||
| 116 | static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ | 118 | static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ |
| 117 | static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ | 119 | static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ |
| 118 | static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ | 120 | static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ |
| 121 | static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ | ||
| 119 | 122 | ||
| 120 | /* -------------------------------------------------------------------------- | 123 | /* -------------------------------------------------------------------------- |
| 121 | Transaction Management | 124 | Transaction Management |
| @@ -440,6 +443,29 @@ acpi_handle ec_get_handle(void) | |||
| 440 | 443 | ||
| 441 | EXPORT_SYMBOL(ec_get_handle); | 444 | EXPORT_SYMBOL(ec_get_handle); |
| 442 | 445 | ||
| 446 | static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data); | ||
| 447 | |||
| 448 | /* | ||
| 449 | * Clears stale _Q events that might have accumulated in the EC. | ||
| 450 | * Run with locked ec mutex. | ||
| 451 | */ | ||
| 452 | static void acpi_ec_clear(struct acpi_ec *ec) | ||
| 453 | { | ||
| 454 | int i, status; | ||
| 455 | u8 value = 0; | ||
| 456 | |||
| 457 | for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { | ||
| 458 | status = acpi_ec_query_unlocked(ec, &value); | ||
| 459 | if (status || !value) | ||
| 460 | break; | ||
| 461 | } | ||
| 462 | |||
| 463 | if (unlikely(i == ACPI_EC_CLEAR_MAX)) | ||
| 464 | pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); | ||
| 465 | else | ||
| 466 | pr_info("%d stale EC events cleared\n", i); | ||
| 467 | } | ||
| 468 | |||
| 443 | void acpi_ec_block_transactions(void) | 469 | void acpi_ec_block_transactions(void) |
| 444 | { | 470 | { |
| 445 | struct acpi_ec *ec = first_ec; | 471 | struct acpi_ec *ec = first_ec; |
| @@ -463,6 +489,10 @@ void acpi_ec_unblock_transactions(void) | |||
| 463 | mutex_lock(&ec->mutex); | 489 | mutex_lock(&ec->mutex); |
| 464 | /* Allow transactions to be carried out again */ | 490 | /* Allow transactions to be carried out again */ |
| 465 | clear_bit(EC_FLAGS_BLOCKED, &ec->flags); | 491 | clear_bit(EC_FLAGS_BLOCKED, &ec->flags); |
| 492 | |||
| 493 | if (EC_FLAGS_CLEAR_ON_RESUME) | ||
| 494 | acpi_ec_clear(ec); | ||
| 495 | |||
| 466 | mutex_unlock(&ec->mutex); | 496 | mutex_unlock(&ec->mutex); |
| 467 | } | 497 | } |
| 468 | 498 | ||
| @@ -821,6 +851,13 @@ static int acpi_ec_add(struct acpi_device *device) | |||
| 821 | 851 | ||
| 822 | /* EC is fully operational, allow queries */ | 852 | /* EC is fully operational, allow queries */ |
| 823 | clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); | 853 | clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); |
| 854 | |||
| 855 | /* Clear stale _Q events if hardware might require that */ | ||
| 856 | if (EC_FLAGS_CLEAR_ON_RESUME) { | ||
| 857 | mutex_lock(&ec->mutex); | ||
| 858 | acpi_ec_clear(ec); | ||
| 859 | mutex_unlock(&ec->mutex); | ||
| 860 | } | ||
| 824 | return ret; | 861 | return ret; |
| 825 | } | 862 | } |
| 826 | 863 | ||
| @@ -922,6 +959,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) | |||
| 922 | return 0; | 959 | return 0; |
| 923 | } | 960 | } |
| 924 | 961 | ||
| 962 | /* | ||
| 963 | * On some hardware it is necessary to clear events accumulated by the EC during | ||
| 964 | * sleep. These ECs stop reporting GPEs until they are manually polled, if too | ||
| 965 | * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) | ||
| 966 | * | ||
| 967 | * https://bugzilla.kernel.org/show_bug.cgi?id=44161 | ||
| 968 | * | ||
| 969 | * Ideally, the EC should also be instructed NOT to accumulate events during | ||
| 970 | * sleep (which Windows seems to do somehow), but the interface to control this | ||
| 971 | * behaviour is not known at this time. | ||
| 972 | * | ||
| 973 | * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, | ||
| 974 | * however it is very likely that other Samsung models are affected. | ||
| 975 | * | ||
| 976 | * On systems which don't accumulate _Q events during sleep, this extra check | ||
| 977 | * should be harmless. | ||
| 978 | */ | ||
| 979 | static int ec_clear_on_resume(const struct dmi_system_id *id) | ||
| 980 | { | ||
| 981 | pr_debug("Detected system needing EC poll on resume.\n"); | ||
| 982 | EC_FLAGS_CLEAR_ON_RESUME = 1; | ||
| 983 | return 0; | ||
| 984 | } | ||
| 985 | |||
| 925 | static struct dmi_system_id ec_dmi_table[] __initdata = { | 986 | static struct dmi_system_id ec_dmi_table[] __initdata = { |
| 926 | { | 987 | { |
| 927 | ec_skip_dsdt_scan, "Compal JFL92", { | 988 | ec_skip_dsdt_scan, "Compal JFL92", { |
| @@ -965,6 +1026,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { | |||
| 965 | ec_validate_ecdt, "ASUS hardware", { | 1026 | ec_validate_ecdt, "ASUS hardware", { |
| 966 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."), | 1027 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."), |
| 967 | DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL}, | 1028 | DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL}, |
| 1029 | { | ||
| 1030 | ec_clear_on_resume, "Samsung hardware", { | ||
| 1031 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, | ||
| 968 | {}, | 1032 | {}, |
| 969 | }; | 1033 | }; |
| 970 | 1034 | ||
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index b7201fc6f1e1..0bdacc5e26a3 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c | |||
| @@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) | |||
| 77 | switch (ares->type) { | 77 | switch (ares->type) { |
| 78 | case ACPI_RESOURCE_TYPE_MEMORY24: | 78 | case ACPI_RESOURCE_TYPE_MEMORY24: |
| 79 | memory24 = &ares->data.memory24; | 79 | memory24 = &ares->data.memory24; |
| 80 | if (!memory24->address_length) | ||
| 81 | return false; | ||
| 80 | acpi_dev_get_memresource(res, memory24->minimum, | 82 | acpi_dev_get_memresource(res, memory24->minimum, |
| 81 | memory24->address_length, | 83 | memory24->address_length, |
| 82 | memory24->write_protect); | 84 | memory24->write_protect); |
| 83 | break; | 85 | break; |
| 84 | case ACPI_RESOURCE_TYPE_MEMORY32: | 86 | case ACPI_RESOURCE_TYPE_MEMORY32: |
| 85 | memory32 = &ares->data.memory32; | 87 | memory32 = &ares->data.memory32; |
| 88 | if (!memory32->address_length) | ||
| 89 | return false; | ||
| 86 | acpi_dev_get_memresource(res, memory32->minimum, | 90 | acpi_dev_get_memresource(res, memory32->minimum, |
| 87 | memory32->address_length, | 91 | memory32->address_length, |
| 88 | memory32->write_protect); | 92 | memory32->write_protect); |
| 89 | break; | 93 | break; |
| 90 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: | 94 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: |
| 91 | fixed_memory32 = &ares->data.fixed_memory32; | 95 | fixed_memory32 = &ares->data.fixed_memory32; |
| 96 | if (!fixed_memory32->address_length) | ||
| 97 | return false; | ||
| 92 | acpi_dev_get_memresource(res, fixed_memory32->address, | 98 | acpi_dev_get_memresource(res, fixed_memory32->address, |
| 93 | fixed_memory32->address_length, | 99 | fixed_memory32->address_length, |
| 94 | fixed_memory32->write_protect); | 100 | fixed_memory32->write_protect); |
| @@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) | |||
| 144 | switch (ares->type) { | 150 | switch (ares->type) { |
| 145 | case ACPI_RESOURCE_TYPE_IO: | 151 | case ACPI_RESOURCE_TYPE_IO: |
| 146 | io = &ares->data.io; | 152 | io = &ares->data.io; |
| 153 | if (!io->address_length) | ||
| 154 | return false; | ||
| 147 | acpi_dev_get_ioresource(res, io->minimum, | 155 | acpi_dev_get_ioresource(res, io->minimum, |
| 148 | io->address_length, | 156 | io->address_length, |
| 149 | io->io_decode); | 157 | io->io_decode); |
| 150 | break; | 158 | break; |
| 151 | case ACPI_RESOURCE_TYPE_FIXED_IO: | 159 | case ACPI_RESOURCE_TYPE_FIXED_IO: |
| 152 | fixed_io = &ares->data.fixed_io; | 160 | fixed_io = &ares->data.fixed_io; |
| 161 | if (!fixed_io->address_length) | ||
| 162 | return false; | ||
| 153 | acpi_dev_get_ioresource(res, fixed_io->address, | 163 | acpi_dev_get_ioresource(res, fixed_io->address, |
| 154 | fixed_io->address_length, | 164 | fixed_io->address_length, |
| 155 | ACPI_DECODE_10); | 165 | ACPI_DECODE_10); |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index b718806657cd..c40fb2e81bbc 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
| @@ -71,6 +71,17 @@ static int acpi_sleep_prepare(u32 acpi_state) | |||
| 71 | return 0; | 71 | return 0; |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | static bool acpi_sleep_state_supported(u8 sleep_state) | ||
| 75 | { | ||
| 76 | acpi_status status; | ||
| 77 | u8 type_a, type_b; | ||
| 78 | |||
| 79 | status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b); | ||
| 80 | return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware | ||
| 81 | || (acpi_gbl_FADT.sleep_control.address | ||
| 82 | && acpi_gbl_FADT.sleep_status.address)); | ||
| 83 | } | ||
| 84 | |||
| 74 | #ifdef CONFIG_ACPI_SLEEP | 85 | #ifdef CONFIG_ACPI_SLEEP |
| 75 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; | 86 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; |
| 76 | 87 | ||
| @@ -604,15 +615,9 @@ static void acpi_sleep_suspend_setup(void) | |||
| 604 | { | 615 | { |
| 605 | int i; | 616 | int i; |
| 606 | 617 | ||
| 607 | for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) { | 618 | for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) |
| 608 | acpi_status status; | 619 | if (acpi_sleep_state_supported(i)) |
| 609 | u8 type_a, type_b; | ||
| 610 | |||
| 611 | status = acpi_get_sleep_type_data(i, &type_a, &type_b); | ||
| 612 | if (ACPI_SUCCESS(status)) { | ||
| 613 | sleep_states[i] = 1; | 620 | sleep_states[i] = 1; |
| 614 | } | ||
| 615 | } | ||
| 616 | 621 | ||
| 617 | suspend_set_ops(old_suspend_ordering ? | 622 | suspend_set_ops(old_suspend_ordering ? |
| 618 | &acpi_suspend_ops_old : &acpi_suspend_ops); | 623 | &acpi_suspend_ops_old : &acpi_suspend_ops); |
| @@ -740,11 +745,7 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = { | |||
| 740 | 745 | ||
| 741 | static void acpi_sleep_hibernate_setup(void) | 746 | static void acpi_sleep_hibernate_setup(void) |
| 742 | { | 747 | { |
| 743 | acpi_status status; | 748 | if (!acpi_sleep_state_supported(ACPI_STATE_S4)) |
| 744 | u8 type_a, type_b; | ||
| 745 | |||
| 746 | status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b); | ||
| 747 | if (ACPI_FAILURE(status)) | ||
| 748 | return; | 749 | return; |
| 749 | 750 | ||
| 750 | hibernation_set_ops(old_suspend_ordering ? | 751 | hibernation_set_ops(old_suspend_ordering ? |
| @@ -793,8 +794,6 @@ static void acpi_power_off(void) | |||
| 793 | 794 | ||
| 794 | int __init acpi_sleep_init(void) | 795 | int __init acpi_sleep_init(void) |
| 795 | { | 796 | { |
| 796 | acpi_status status; | ||
| 797 | u8 type_a, type_b; | ||
| 798 | char supported[ACPI_S_STATE_COUNT * 3 + 1]; | 797 | char supported[ACPI_S_STATE_COUNT * 3 + 1]; |
| 799 | char *pos = supported; | 798 | char *pos = supported; |
| 800 | int i; | 799 | int i; |
| @@ -806,8 +805,7 @@ int __init acpi_sleep_init(void) | |||
| 806 | acpi_sleep_suspend_setup(); | 805 | acpi_sleep_suspend_setup(); |
| 807 | acpi_sleep_hibernate_setup(); | 806 | acpi_sleep_hibernate_setup(); |
| 808 | 807 | ||
| 809 | status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); | 808 | if (acpi_sleep_state_supported(ACPI_STATE_S5)) { |
| 810 | if (ACPI_SUCCESS(status)) { | ||
| 811 | sleep_states[ACPI_STATE_S5] = 1; | 809 | sleep_states[ACPI_STATE_S5] = 1; |
| 812 | pm_power_off_prepare = acpi_power_off_prepare; | 810 | pm_power_off_prepare = acpi_power_off_prepare; |
| 813 | pm_power_off = acpi_power_off; | 811 | pm_power_off = acpi_power_off; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 1a3dbd1b196e..8cb2522d592a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -4175,6 +4175,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4175 | 4175 | ||
| 4176 | /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ | 4176 | /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ |
| 4177 | { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, | 4177 | { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, |
| 4178 | { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, | ||
| 4178 | 4179 | ||
| 4179 | /* Blacklist entries taken from Silicon Image 3124/3132 | 4180 | /* Blacklist entries taken from Silicon Image 3124/3132 |
| 4180 | Windows driver .inf file - also several Linux problem reports */ | 4181 | Windows driver .inf file - also several Linux problem reports */ |
| @@ -4224,7 +4225,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4224 | 4225 | ||
| 4225 | /* devices that don't properly handle queued TRIM commands */ | 4226 | /* devices that don't properly handle queued TRIM commands */ |
| 4226 | { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | 4227 | { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
| 4227 | { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | 4228 | { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
| 4228 | 4229 | ||
| 4229 | /* | 4230 | /* |
| 4230 | * Some WD SATA-I drives spin up and down erratically when the link | 4231 | * Some WD SATA-I drives spin up and down erratically when the link |
diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 545c4de412c3..db4e264eecb6 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c | |||
| @@ -791,6 +791,32 @@ void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) | |||
| 791 | EXPORT_SYMBOL_GPL(devm_kmalloc); | 791 | EXPORT_SYMBOL_GPL(devm_kmalloc); |
| 792 | 792 | ||
| 793 | /** | 793 | /** |
| 794 | * devm_kstrdup - Allocate resource managed space and | ||
| 795 | * copy an existing string into that. | ||
| 796 | * @dev: Device to allocate memory for | ||
| 797 | * @s: the string to duplicate | ||
| 798 | * @gfp: the GFP mask used in the devm_kmalloc() call when | ||
| 799 | * allocating memory | ||
| 800 | * RETURNS: | ||
| 801 | * Pointer to allocated string on success, NULL on failure. | ||
| 802 | */ | ||
| 803 | char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) | ||
| 804 | { | ||
| 805 | size_t size; | ||
| 806 | char *buf; | ||
| 807 | |||
| 808 | if (!s) | ||
| 809 | return NULL; | ||
| 810 | |||
| 811 | size = strlen(s) + 1; | ||
| 812 | buf = devm_kmalloc(dev, size, gfp); | ||
| 813 | if (buf) | ||
| 814 | memcpy(buf, s, size); | ||
| 815 | return buf; | ||
| 816 | } | ||
| 817 | EXPORT_SYMBOL_GPL(devm_kstrdup); | ||
| 818 | |||
| 819 | /** | ||
| 794 | * devm_kfree - Resource-managed kfree | 820 | * devm_kfree - Resource-managed kfree |
| 795 | * @dev: Device this memory belongs to | 821 | * @dev: Device this memory belongs to |
| 796 | * @p: Memory to free | 822 | * @p: Memory to free |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 8184451b57c0..422b7d84f686 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
| @@ -874,7 +874,7 @@ bio_pageinc(struct bio *bio) | |||
| 874 | /* Non-zero page count for non-head members of | 874 | /* Non-zero page count for non-head members of |
| 875 | * compound pages is no longer allowed by the kernel. | 875 | * compound pages is no longer allowed by the kernel. |
| 876 | */ | 876 | */ |
| 877 | page = compound_trans_head(bv.bv_page); | 877 | page = compound_head(bv.bv_page); |
| 878 | atomic_inc(&page->_count); | 878 | atomic_inc(&page->_count); |
| 879 | } | 879 | } |
| 880 | } | 880 | } |
| @@ -887,7 +887,7 @@ bio_pagedec(struct bio *bio) | |||
| 887 | struct bvec_iter iter; | 887 | struct bvec_iter iter; |
| 888 | 888 | ||
| 889 | bio_for_each_segment(bv, bio, iter) { | 889 | bio_for_each_segment(bv, bio, iter) { |
| 890 | page = compound_trans_head(bv.bv_page); | 890 | page = compound_head(bv.bv_page); |
| 891 | atomic_dec(&page->_count); | 891 | atomic_dec(&page->_count); |
| 892 | } | 892 | } |
| 893 | } | 893 | } |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 516026954be6..d777bb7cea93 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
| @@ -4498,7 +4498,7 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
| 4498 | } | 4498 | } |
| 4499 | dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n", | 4499 | dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n", |
| 4500 | my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev), | 4500 | my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev), |
| 4501 | cpu_to_node(smp_processor_id()), smp_processor_id()); | 4501 | cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id()); |
| 4502 | 4502 | ||
| 4503 | dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node); | 4503 | dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node); |
| 4504 | if (dd == NULL) { | 4504 | if (dd == NULL) { |
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index b52e9a6d6aad..54174cb32feb 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h | |||
| @@ -53,7 +53,7 @@ | |||
| 53 | #define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000 | 53 | #define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000 |
| 54 | 54 | ||
| 55 | /* unaligned IO handling */ | 55 | /* unaligned IO handling */ |
| 56 | #define MTIP_MAX_UNALIGNED_SLOTS 8 | 56 | #define MTIP_MAX_UNALIGNED_SLOTS 2 |
| 57 | 57 | ||
| 58 | /* Macro to extract the tag bit number from a tag value. */ | 58 | /* Macro to extract the tag bit number from a tag value. */ |
| 59 | #define MTIP_TAG_BIT(tag) (tag & 0x1F) | 59 | #define MTIP_TAG_BIT(tag) (tag & 0x1F) |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 011e55d820b1..51c557cfd92b 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
| @@ -612,6 +612,8 @@ static ssize_t disksize_store(struct device *dev, | |||
| 612 | 612 | ||
| 613 | disksize = PAGE_ALIGN(disksize); | 613 | disksize = PAGE_ALIGN(disksize); |
| 614 | meta = zram_meta_alloc(disksize); | 614 | meta = zram_meta_alloc(disksize); |
| 615 | if (!meta) | ||
| 616 | return -ENOMEM; | ||
| 615 | down_write(&zram->init_lock); | 617 | down_write(&zram->init_lock); |
| 616 | if (zram->init_done) { | 618 | if (zram->init_done) { |
| 617 | up_write(&zram->init_lock); | 619 | up_write(&zram->init_lock); |
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c index bd313f7816a8..c1af80bcdf20 100644 --- a/drivers/clk/at91/clk-master.c +++ b/drivers/clk/at91/clk-master.c | |||
| @@ -242,7 +242,7 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc, | |||
| 242 | 242 | ||
| 243 | irq = irq_of_parse_and_map(np, 0); | 243 | irq = irq_of_parse_and_map(np, 0); |
| 244 | if (!irq) | 244 | if (!irq) |
| 245 | return; | 245 | goto out_free_characteristics; |
| 246 | 246 | ||
| 247 | clk = at91_clk_register_master(pmc, irq, name, num_parents, | 247 | clk = at91_clk_register_master(pmc, irq, name, num_parents, |
| 248 | parent_names, layout, | 248 | parent_names, layout, |
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c index 6a934a5296bd..05e04ce0f148 100644 --- a/drivers/clk/clk-nomadik.c +++ b/drivers/clk/clk-nomadik.c | |||
| @@ -494,6 +494,9 @@ static const struct file_operations nomadik_src_clk_debugfs_ops = { | |||
| 494 | 494 | ||
| 495 | static int __init nomadik_src_clk_init_debugfs(void) | 495 | static int __init nomadik_src_clk_init_debugfs(void) |
| 496 | { | 496 | { |
| 497 | /* Vital for multiplatform */ | ||
| 498 | if (!src_base) | ||
| 499 | return -ENODEV; | ||
| 497 | src_pcksr0_boot = readl(src_base + SRC_PCKSR0); | 500 | src_pcksr0_boot = readl(src_base + SRC_PCKSR0); |
| 498 | src_pcksr1_boot = readl(src_base + SRC_PCKSR1); | 501 | src_pcksr1_boot = readl(src_base + SRC_PCKSR1); |
| 499 | debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO, | 502 | debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO, |
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 5517944495d8..c42e608af6bb 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
| @@ -2226,24 +2226,25 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister); | |||
| 2226 | */ | 2226 | */ |
| 2227 | int __clk_get(struct clk *clk) | 2227 | int __clk_get(struct clk *clk) |
| 2228 | { | 2228 | { |
| 2229 | if (clk && !try_module_get(clk->owner)) | 2229 | if (clk) { |
| 2230 | return 0; | 2230 | if (!try_module_get(clk->owner)) |
| 2231 | return 0; | ||
| 2231 | 2232 | ||
| 2232 | kref_get(&clk->ref); | 2233 | kref_get(&clk->ref); |
| 2234 | } | ||
| 2233 | return 1; | 2235 | return 1; |
| 2234 | } | 2236 | } |
| 2235 | 2237 | ||
| 2236 | void __clk_put(struct clk *clk) | 2238 | void __clk_put(struct clk *clk) |
| 2237 | { | 2239 | { |
| 2238 | if (WARN_ON_ONCE(IS_ERR(clk))) | 2240 | if (!clk || WARN_ON_ONCE(IS_ERR(clk))) |
| 2239 | return; | 2241 | return; |
| 2240 | 2242 | ||
| 2241 | clk_prepare_lock(); | 2243 | clk_prepare_lock(); |
| 2242 | kref_put(&clk->ref, __clk_release); | 2244 | kref_put(&clk->ref, __clk_release); |
| 2243 | clk_prepare_unlock(); | 2245 | clk_prepare_unlock(); |
| 2244 | 2246 | ||
| 2245 | if (clk) | 2247 | module_put(clk->owner); |
| 2246 | module_put(clk->owner); | ||
| 2247 | } | 2248 | } |
| 2248 | 2249 | ||
| 2249 | /*** clk rate change notifiers ***/ | 2250 | /*** clk rate change notifiers ***/ |
diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c index 17a598398a53..86f1e362eafb 100644 --- a/drivers/clk/keystone/gate.c +++ b/drivers/clk/keystone/gate.c | |||
| @@ -179,6 +179,7 @@ static struct clk *clk_register_psc(struct device *dev, | |||
| 179 | 179 | ||
| 180 | init.name = name; | 180 | init.name = name; |
| 181 | init.ops = &clk_psc_ops; | 181 | init.ops = &clk_psc_ops; |
| 182 | init.flags = 0; | ||
| 182 | init.parent_names = (parent_name ? &parent_name : NULL); | 183 | init.parent_names = (parent_name ? &parent_name : NULL); |
| 183 | init.num_parents = (parent_name ? 1 : 0); | 184 | init.num_parents = (parent_name ? 1 : 0); |
| 184 | 185 | ||
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c index 81a202d12a7a..bef198a83863 100644 --- a/drivers/clk/mvebu/armada-370.c +++ b/drivers/clk/mvebu/armada-370.c | |||
| @@ -141,13 +141,6 @@ static const struct coreclk_soc_desc a370_coreclks = { | |||
| 141 | .num_ratios = ARRAY_SIZE(a370_coreclk_ratios), | 141 | .num_ratios = ARRAY_SIZE(a370_coreclk_ratios), |
| 142 | }; | 142 | }; |
| 143 | 143 | ||
| 144 | static void __init a370_coreclk_init(struct device_node *np) | ||
| 145 | { | ||
| 146 | mvebu_coreclk_setup(np, &a370_coreclks); | ||
| 147 | } | ||
| 148 | CLK_OF_DECLARE(a370_core_clk, "marvell,armada-370-core-clock", | ||
| 149 | a370_coreclk_init); | ||
| 150 | |||
| 151 | /* | 144 | /* |
| 152 | * Clock Gating Control | 145 | * Clock Gating Control |
| 153 | */ | 146 | */ |
| @@ -168,9 +161,15 @@ static const struct clk_gating_soc_desc a370_gating_desc[] __initconst = { | |||
| 168 | { } | 161 | { } |
| 169 | }; | 162 | }; |
| 170 | 163 | ||
| 171 | static void __init a370_clk_gating_init(struct device_node *np) | 164 | static void __init a370_clk_init(struct device_node *np) |
| 172 | { | 165 | { |
| 173 | mvebu_clk_gating_setup(np, a370_gating_desc); | 166 | struct device_node *cgnp = |
| 167 | of_find_compatible_node(NULL, NULL, "marvell,armada-370-gating-clock"); | ||
| 168 | |||
| 169 | mvebu_coreclk_setup(np, &a370_coreclks); | ||
| 170 | |||
| 171 | if (cgnp) | ||
| 172 | mvebu_clk_gating_setup(cgnp, a370_gating_desc); | ||
| 174 | } | 173 | } |
| 175 | CLK_OF_DECLARE(a370_clk_gating, "marvell,armada-370-gating-clock", | 174 | CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init); |
| 176 | a370_clk_gating_init); | 175 | |
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c index 9922c4475aa8..b3094315a3c0 100644 --- a/drivers/clk/mvebu/armada-xp.c +++ b/drivers/clk/mvebu/armada-xp.c | |||
| @@ -158,13 +158,6 @@ static const struct coreclk_soc_desc axp_coreclks = { | |||
| 158 | .num_ratios = ARRAY_SIZE(axp_coreclk_ratios), | 158 | .num_ratios = ARRAY_SIZE(axp_coreclk_ratios), |
| 159 | }; | 159 | }; |
| 160 | 160 | ||
| 161 | static void __init axp_coreclk_init(struct device_node *np) | ||
| 162 | { | ||
| 163 | mvebu_coreclk_setup(np, &axp_coreclks); | ||
| 164 | } | ||
| 165 | CLK_OF_DECLARE(axp_core_clk, "marvell,armada-xp-core-clock", | ||
| 166 | axp_coreclk_init); | ||
| 167 | |||
| 168 | /* | 161 | /* |
| 169 | * Clock Gating Control | 162 | * Clock Gating Control |
| 170 | */ | 163 | */ |
| @@ -202,9 +195,14 @@ static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = { | |||
| 202 | { } | 195 | { } |
| 203 | }; | 196 | }; |
| 204 | 197 | ||
| 205 | static void __init axp_clk_gating_init(struct device_node *np) | 198 | static void __init axp_clk_init(struct device_node *np) |
| 206 | { | 199 | { |
| 207 | mvebu_clk_gating_setup(np, axp_gating_desc); | 200 | struct device_node *cgnp = |
| 201 | of_find_compatible_node(NULL, NULL, "marvell,armada-xp-gating-clock"); | ||
| 202 | |||
| 203 | mvebu_coreclk_setup(np, &axp_coreclks); | ||
| 204 | |||
| 205 | if (cgnp) | ||
| 206 | mvebu_clk_gating_setup(cgnp, axp_gating_desc); | ||
| 208 | } | 207 | } |
| 209 | CLK_OF_DECLARE(axp_clk_gating, "marvell,armada-xp-gating-clock", | 208 | CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init); |
| 210 | axp_clk_gating_init); | ||
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c index 38aee1e3f242..b8c2424ac926 100644 --- a/drivers/clk/mvebu/dove.c +++ b/drivers/clk/mvebu/dove.c | |||
| @@ -154,12 +154,6 @@ static const struct coreclk_soc_desc dove_coreclks = { | |||
| 154 | .num_ratios = ARRAY_SIZE(dove_coreclk_ratios), | 154 | .num_ratios = ARRAY_SIZE(dove_coreclk_ratios), |
| 155 | }; | 155 | }; |
| 156 | 156 | ||
| 157 | static void __init dove_coreclk_init(struct device_node *np) | ||
| 158 | { | ||
| 159 | mvebu_coreclk_setup(np, &dove_coreclks); | ||
| 160 | } | ||
| 161 | CLK_OF_DECLARE(dove_core_clk, "marvell,dove-core-clock", dove_coreclk_init); | ||
| 162 | |||
| 163 | /* | 157 | /* |
| 164 | * Clock Gating Control | 158 | * Clock Gating Control |
| 165 | */ | 159 | */ |
| @@ -186,9 +180,14 @@ static const struct clk_gating_soc_desc dove_gating_desc[] __initconst = { | |||
| 186 | { } | 180 | { } |
| 187 | }; | 181 | }; |
| 188 | 182 | ||
| 189 | static void __init dove_clk_gating_init(struct device_node *np) | 183 | static void __init dove_clk_init(struct device_node *np) |
| 190 | { | 184 | { |
| 191 | mvebu_clk_gating_setup(np, dove_gating_desc); | 185 | struct device_node *cgnp = |
| 186 | of_find_compatible_node(NULL, NULL, "marvell,dove-gating-clock"); | ||
| 187 | |||
| 188 | mvebu_coreclk_setup(np, &dove_coreclks); | ||
| 189 | |||
| 190 | if (cgnp) | ||
| 191 | mvebu_clk_gating_setup(cgnp, dove_gating_desc); | ||
| 192 | } | 192 | } |
| 193 | CLK_OF_DECLARE(dove_clk_gating, "marvell,dove-gating-clock", | 193 | CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init); |
| 194 | dove_clk_gating_init); | ||
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c index 2636a55f29f9..ddb666a86500 100644 --- a/drivers/clk/mvebu/kirkwood.c +++ b/drivers/clk/mvebu/kirkwood.c | |||
| @@ -193,13 +193,6 @@ static const struct coreclk_soc_desc kirkwood_coreclks = { | |||
| 193 | .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), | 193 | .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), |
| 194 | }; | 194 | }; |
| 195 | 195 | ||
| 196 | static void __init kirkwood_coreclk_init(struct device_node *np) | ||
| 197 | { | ||
| 198 | mvebu_coreclk_setup(np, &kirkwood_coreclks); | ||
| 199 | } | ||
| 200 | CLK_OF_DECLARE(kirkwood_core_clk, "marvell,kirkwood-core-clock", | ||
| 201 | kirkwood_coreclk_init); | ||
| 202 | |||
| 203 | static const struct coreclk_soc_desc mv88f6180_coreclks = { | 196 | static const struct coreclk_soc_desc mv88f6180_coreclks = { |
| 204 | .get_tclk_freq = kirkwood_get_tclk_freq, | 197 | .get_tclk_freq = kirkwood_get_tclk_freq, |
| 205 | .get_cpu_freq = mv88f6180_get_cpu_freq, | 198 | .get_cpu_freq = mv88f6180_get_cpu_freq, |
| @@ -208,13 +201,6 @@ static const struct coreclk_soc_desc mv88f6180_coreclks = { | |||
| 208 | .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), | 201 | .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), |
| 209 | }; | 202 | }; |
| 210 | 203 | ||
| 211 | static void __init mv88f6180_coreclk_init(struct device_node *np) | ||
| 212 | { | ||
| 213 | mvebu_coreclk_setup(np, &mv88f6180_coreclks); | ||
| 214 | } | ||
| 215 | CLK_OF_DECLARE(mv88f6180_core_clk, "marvell,mv88f6180-core-clock", | ||
| 216 | mv88f6180_coreclk_init); | ||
| 217 | |||
| 218 | /* | 204 | /* |
| 219 | * Clock Gating Control | 205 | * Clock Gating Control |
| 220 | */ | 206 | */ |
| @@ -239,9 +225,21 @@ static const struct clk_gating_soc_desc kirkwood_gating_desc[] __initconst = { | |||
| 239 | { } | 225 | { } |
| 240 | }; | 226 | }; |
| 241 | 227 | ||
| 242 | static void __init kirkwood_clk_gating_init(struct device_node *np) | 228 | static void __init kirkwood_clk_init(struct device_node *np) |
| 243 | { | 229 | { |
| 244 | mvebu_clk_gating_setup(np, kirkwood_gating_desc); | 230 | struct device_node *cgnp = |
| 231 | of_find_compatible_node(NULL, NULL, "marvell,kirkwood-gating-clock"); | ||
| 232 | |||
| 233 | |||
| 234 | if (of_device_is_compatible(np, "marvell,mv88f6180-core-clock")) | ||
| 235 | mvebu_coreclk_setup(np, &mv88f6180_coreclks); | ||
| 236 | else | ||
| 237 | mvebu_coreclk_setup(np, &kirkwood_coreclks); | ||
| 238 | |||
| 239 | if (cgnp) | ||
| 240 | mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc); | ||
| 245 | } | 241 | } |
| 246 | CLK_OF_DECLARE(kirkwood_clk_gating, "marvell,kirkwood-gating-clock", | 242 | CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock", |
| 247 | kirkwood_clk_gating_init); | 243 | kirkwood_clk_init); |
| 244 | CLK_OF_DECLARE(mv88f6180_clk, "marvell,mv88f6180-core-clock", | ||
| 245 | kirkwood_clk_init); | ||
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c index a59ec217a124..99c27b1c625b 100644 --- a/drivers/clk/shmobile/clk-rcar-gen2.c +++ b/drivers/clk/shmobile/clk-rcar-gen2.c | |||
| @@ -26,6 +26,8 @@ struct rcar_gen2_cpg { | |||
| 26 | void __iomem *reg; | 26 | void __iomem *reg; |
| 27 | }; | 27 | }; |
| 28 | 28 | ||
| 29 | #define CPG_FRQCRB 0x00000004 | ||
| 30 | #define CPG_FRQCRB_KICK BIT(31) | ||
| 29 | #define CPG_SDCKCR 0x00000074 | 31 | #define CPG_SDCKCR 0x00000074 |
| 30 | #define CPG_PLL0CR 0x000000d8 | 32 | #define CPG_PLL0CR 0x000000d8 |
| 31 | #define CPG_FRQCRC 0x000000e0 | 33 | #define CPG_FRQCRC 0x000000e0 |
| @@ -45,6 +47,7 @@ struct rcar_gen2_cpg { | |||
| 45 | struct cpg_z_clk { | 47 | struct cpg_z_clk { |
| 46 | struct clk_hw hw; | 48 | struct clk_hw hw; |
| 47 | void __iomem *reg; | 49 | void __iomem *reg; |
| 50 | void __iomem *kick_reg; | ||
| 48 | }; | 51 | }; |
| 49 | 52 | ||
| 50 | #define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw) | 53 | #define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw) |
| @@ -83,17 +86,45 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 83 | { | 86 | { |
| 84 | struct cpg_z_clk *zclk = to_z_clk(hw); | 87 | struct cpg_z_clk *zclk = to_z_clk(hw); |
| 85 | unsigned int mult; | 88 | unsigned int mult; |
| 86 | u32 val; | 89 | u32 val, kick; |
| 90 | unsigned int i; | ||
| 87 | 91 | ||
| 88 | mult = div_u64((u64)rate * 32, parent_rate); | 92 | mult = div_u64((u64)rate * 32, parent_rate); |
| 89 | mult = clamp(mult, 1U, 32U); | 93 | mult = clamp(mult, 1U, 32U); |
| 90 | 94 | ||
| 95 | if (clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK) | ||
| 96 | return -EBUSY; | ||
| 97 | |||
| 91 | val = clk_readl(zclk->reg); | 98 | val = clk_readl(zclk->reg); |
| 92 | val &= ~CPG_FRQCRC_ZFC_MASK; | 99 | val &= ~CPG_FRQCRC_ZFC_MASK; |
| 93 | val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT; | 100 | val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT; |
| 94 | clk_writel(val, zclk->reg); | 101 | clk_writel(val, zclk->reg); |
| 95 | 102 | ||
| 96 | return 0; | 103 | /* |
| 104 | * Set KICK bit in FRQCRB to update hardware setting and wait for | ||
| 105 | * clock change completion. | ||
| 106 | */ | ||
| 107 | kick = clk_readl(zclk->kick_reg); | ||
| 108 | kick |= CPG_FRQCRB_KICK; | ||
| 109 | clk_writel(kick, zclk->kick_reg); | ||
| 110 | |||
| 111 | /* | ||
| 112 | * Note: There is no HW information about the worst case latency. | ||
| 113 | * | ||
| 114 | * Using experimental measurements, it seems that no more than | ||
| 115 | * ~10 iterations are needed, independently of the CPU rate. | ||
| 116 | * Since this value might be dependant of external xtal rate, pll1 | ||
| 117 | * rate or even the other emulation clocks rate, use 1000 as a | ||
| 118 | * "super" safe value. | ||
| 119 | */ | ||
| 120 | for (i = 1000; i; i--) { | ||
| 121 | if (!(clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK)) | ||
| 122 | return 0; | ||
| 123 | |||
| 124 | cpu_relax(); | ||
| 125 | } | ||
| 126 | |||
| 127 | return -ETIMEDOUT; | ||
| 97 | } | 128 | } |
| 98 | 129 | ||
| 99 | static const struct clk_ops cpg_z_clk_ops = { | 130 | static const struct clk_ops cpg_z_clk_ops = { |
| @@ -120,6 +151,7 @@ static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg) | |||
| 120 | init.num_parents = 1; | 151 | init.num_parents = 1; |
| 121 | 152 | ||
| 122 | zclk->reg = cpg->reg + CPG_FRQCRC; | 153 | zclk->reg = cpg->reg + CPG_FRQCRC; |
| 154 | zclk->kick_reg = cpg->reg + CPG_FRQCRB; | ||
| 123 | zclk->hw.init = &init; | 155 | zclk->hw.init = &init; |
| 124 | 156 | ||
| 125 | clk = clk_register(NULL, &zclk->hw); | 157 | clk = clk_register(NULL, &zclk->hw); |
| @@ -186,7 +218,7 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg, | |||
| 186 | const char *name) | 218 | const char *name) |
| 187 | { | 219 | { |
| 188 | const struct clk_div_table *table = NULL; | 220 | const struct clk_div_table *table = NULL; |
| 189 | const char *parent_name = "main"; | 221 | const char *parent_name; |
| 190 | unsigned int shift; | 222 | unsigned int shift; |
| 191 | unsigned int mult = 1; | 223 | unsigned int mult = 1; |
| 192 | unsigned int div = 1; | 224 | unsigned int div = 1; |
| @@ -201,23 +233,31 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg, | |||
| 201 | * the multiplier value. | 233 | * the multiplier value. |
| 202 | */ | 234 | */ |
| 203 | u32 value = clk_readl(cpg->reg + CPG_PLL0CR); | 235 | u32 value = clk_readl(cpg->reg + CPG_PLL0CR); |
| 236 | parent_name = "main"; | ||
| 204 | mult = ((value >> 24) & ((1 << 7) - 1)) + 1; | 237 | mult = ((value >> 24) & ((1 << 7) - 1)) + 1; |
| 205 | } else if (!strcmp(name, "pll1")) { | 238 | } else if (!strcmp(name, "pll1")) { |
| 239 | parent_name = "main"; | ||
| 206 | mult = config->pll1_mult / 2; | 240 | mult = config->pll1_mult / 2; |
| 207 | } else if (!strcmp(name, "pll3")) { | 241 | } else if (!strcmp(name, "pll3")) { |
| 242 | parent_name = "main"; | ||
| 208 | mult = config->pll3_mult; | 243 | mult = config->pll3_mult; |
| 209 | } else if (!strcmp(name, "lb")) { | 244 | } else if (!strcmp(name, "lb")) { |
| 245 | parent_name = "pll1_div2"; | ||
| 210 | div = cpg_mode & BIT(18) ? 36 : 24; | 246 | div = cpg_mode & BIT(18) ? 36 : 24; |
| 211 | } else if (!strcmp(name, "qspi")) { | 247 | } else if (!strcmp(name, "qspi")) { |
| 248 | parent_name = "pll1_div2"; | ||
| 212 | div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2) | 249 | div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2) |
| 213 | ? 16 : 20; | 250 | ? 8 : 10; |
| 214 | } else if (!strcmp(name, "sdh")) { | 251 | } else if (!strcmp(name, "sdh")) { |
| 252 | parent_name = "pll1_div2"; | ||
| 215 | table = cpg_sdh_div_table; | 253 | table = cpg_sdh_div_table; |
| 216 | shift = 8; | 254 | shift = 8; |
| 217 | } else if (!strcmp(name, "sd0")) { | 255 | } else if (!strcmp(name, "sd0")) { |
| 256 | parent_name = "pll1_div2"; | ||
| 218 | table = cpg_sd01_div_table; | 257 | table = cpg_sd01_div_table; |
| 219 | shift = 4; | 258 | shift = 4; |
| 220 | } else if (!strcmp(name, "sd1")) { | 259 | } else if (!strcmp(name, "sd1")) { |
| 260 | parent_name = "pll1_div2"; | ||
| 221 | table = cpg_sd01_div_table; | 261 | table = cpg_sd01_div_table; |
| 222 | shift = 0; | 262 | shift = 0; |
| 223 | } else if (!strcmp(name, "z")) { | 263 | } else if (!strcmp(name, "z")) { |
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c index 4d75b1f37e3a..290f9c1a3749 100644 --- a/drivers/clk/tegra/clk-divider.c +++ b/drivers/clk/tegra/clk-divider.c | |||
| @@ -59,7 +59,7 @@ static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate, | |||
| 59 | return 0; | 59 | return 0; |
| 60 | 60 | ||
| 61 | if (divider_ux1 > get_max_div(divider)) | 61 | if (divider_ux1 > get_max_div(divider)) |
| 62 | return -EINVAL; | 62 | return get_max_div(divider); |
| 63 | 63 | ||
| 64 | return divider_ux1; | 64 | return divider_ux1; |
| 65 | } | 65 | } |
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h index cf0c323f2c36..c39613c519af 100644 --- a/drivers/clk/tegra/clk-id.h +++ b/drivers/clk/tegra/clk-id.h | |||
| @@ -180,9 +180,13 @@ enum clk_id { | |||
| 180 | tegra_clk_sbc6_8, | 180 | tegra_clk_sbc6_8, |
| 181 | tegra_clk_sclk, | 181 | tegra_clk_sclk, |
| 182 | tegra_clk_sdmmc1, | 182 | tegra_clk_sdmmc1, |
| 183 | tegra_clk_sdmmc1_8, | ||
| 183 | tegra_clk_sdmmc2, | 184 | tegra_clk_sdmmc2, |
| 185 | tegra_clk_sdmmc2_8, | ||
| 184 | tegra_clk_sdmmc3, | 186 | tegra_clk_sdmmc3, |
| 187 | tegra_clk_sdmmc3_8, | ||
| 185 | tegra_clk_sdmmc4, | 188 | tegra_clk_sdmmc4, |
| 189 | tegra_clk_sdmmc4_8, | ||
| 186 | tegra_clk_se, | 190 | tegra_clk_se, |
| 187 | tegra_clk_soc_therm, | 191 | tegra_clk_soc_therm, |
| 188 | tegra_clk_sor0, | 192 | tegra_clk_sor0, |
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index 5c35885f4a7c..1fa5c3f33b20 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c | |||
| @@ -371,9 +371,7 @@ static const char *mux_pllp3_pllc_clkm[] = { | |||
| 371 | static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = { | 371 | static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = { |
| 372 | "pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m" | 372 | "pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m" |
| 373 | }; | 373 | }; |
| 374 | static u32 mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx[] = { | 374 | #define mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx NULL |
| 375 | [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6, | ||
| 376 | }; | ||
| 377 | 375 | ||
| 378 | static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = { | 376 | static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = { |
| 379 | "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4", | 377 | "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4", |
| @@ -465,6 +463,10 @@ static struct tegra_periph_init_data periph_clks[] = { | |||
| 465 | MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1), | 463 | MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1), |
| 466 | MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1), | 464 | MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1), |
| 467 | MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2), | 465 | MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2), |
| 466 | MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8), | ||
| 467 | MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8), | ||
| 468 | MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8), | ||
| 469 | MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8), | ||
| 468 | MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8), | 470 | MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8), |
| 469 | MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8), | 471 | MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8), |
| 470 | MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8), | 472 | MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8), |
| @@ -492,7 +494,7 @@ static struct tegra_periph_init_data periph_clks[] = { | |||
| 492 | UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb), | 494 | UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb), |
| 493 | UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc), | 495 | UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc), |
| 494 | UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd), | 496 | UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd), |
| 495 | UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 65, tegra_clk_uarte), | 497 | UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 66, tegra_clk_uarte), |
| 496 | XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src), | 498 | XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src), |
| 497 | XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src), | 499 | XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src), |
| 498 | XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src), | 500 | XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src), |
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c index 05dce4aa2c11..feb3201c85ce 100644 --- a/drivers/clk/tegra/clk-tegra-super-gen4.c +++ b/drivers/clk/tegra/clk-tegra-super-gen4.c | |||
| @@ -120,7 +120,7 @@ void __init tegra_super_clk_gen4_init(void __iomem *clk_base, | |||
| 120 | ARRAY_SIZE(cclk_lp_parents), | 120 | ARRAY_SIZE(cclk_lp_parents), |
| 121 | CLK_SET_RATE_PARENT, | 121 | CLK_SET_RATE_PARENT, |
| 122 | clk_base + CCLKLP_BURST_POLICY, | 122 | clk_base + CCLKLP_BURST_POLICY, |
| 123 | 0, 4, 8, 9, NULL); | 123 | TEGRA_DIVIDER_2, 4, 8, 9, NULL); |
| 124 | *dt_clk = clk; | 124 | *dt_clk = clk; |
| 125 | } | 125 | } |
| 126 | 126 | ||
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index 90d9d25f2228..80431f0fb268 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c | |||
| @@ -682,12 +682,12 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { | |||
| 682 | [tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true }, | 682 | [tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true }, |
| 683 | [tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true }, | 683 | [tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true }, |
| 684 | [tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true }, | 684 | [tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true }, |
| 685 | [tegra_clk_sdmmc2] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true }, | 685 | [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true }, |
| 686 | [tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true }, | 686 | [tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true }, |
| 687 | [tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true }, | 687 | [tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true }, |
| 688 | [tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true }, | 688 | [tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true }, |
| 689 | [tegra_clk_sdmmc1] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true }, | 689 | [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true }, |
| 690 | [tegra_clk_sdmmc4] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true }, | 690 | [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true }, |
| 691 | [tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true }, | 691 | [tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true }, |
| 692 | [tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true }, | 692 | [tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true }, |
| 693 | [tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true }, | 693 | [tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true }, |
| @@ -723,7 +723,7 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { | |||
| 723 | [tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true }, | 723 | [tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true }, |
| 724 | [tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true }, | 724 | [tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true }, |
| 725 | [tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true }, | 725 | [tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true }, |
| 726 | [tegra_clk_sdmmc3] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true }, | 726 | [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true }, |
| 727 | [tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true }, | 727 | [tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true }, |
| 728 | [tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true }, | 728 | [tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true }, |
| 729 | [tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true }, | 729 | [tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true }, |
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index aff86b5bc745..166e02f16c8a 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c | |||
| @@ -516,11 +516,11 @@ static struct div_nmp pllp_nmp = { | |||
| 516 | }; | 516 | }; |
| 517 | 517 | ||
| 518 | static struct tegra_clk_pll_freq_table pll_p_freq_table[] = { | 518 | static struct tegra_clk_pll_freq_table pll_p_freq_table[] = { |
| 519 | {12000000, 216000000, 432, 12, 1, 8}, | 519 | {12000000, 408000000, 408, 12, 0, 8}, |
| 520 | {13000000, 216000000, 432, 13, 1, 8}, | 520 | {13000000, 408000000, 408, 13, 0, 8}, |
| 521 | {16800000, 216000000, 360, 14, 1, 8}, | 521 | {16800000, 408000000, 340, 14, 0, 8}, |
| 522 | {19200000, 216000000, 360, 16, 1, 8}, | 522 | {19200000, 408000000, 340, 16, 0, 8}, |
| 523 | {26000000, 216000000, 432, 26, 1, 8}, | 523 | {26000000, 408000000, 408, 26, 0, 8}, |
| 524 | {0, 0, 0, 0, 0, 0}, | 524 | {0, 0, 0, 0, 0, 0}, |
| 525 | }; | 525 | }; |
| 526 | 526 | ||
| @@ -570,6 +570,15 @@ static struct tegra_clk_pll_params pll_a_params = { | |||
| 570 | .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK, | 570 | .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK, |
| 571 | }; | 571 | }; |
| 572 | 572 | ||
| 573 | static struct div_nmp plld_nmp = { | ||
| 574 | .divm_shift = 0, | ||
| 575 | .divm_width = 5, | ||
| 576 | .divn_shift = 8, | ||
| 577 | .divn_width = 11, | ||
| 578 | .divp_shift = 20, | ||
| 579 | .divp_width = 3, | ||
| 580 | }; | ||
| 581 | |||
| 573 | static struct tegra_clk_pll_freq_table pll_d_freq_table[] = { | 582 | static struct tegra_clk_pll_freq_table pll_d_freq_table[] = { |
| 574 | {12000000, 216000000, 864, 12, 4, 12}, | 583 | {12000000, 216000000, 864, 12, 4, 12}, |
| 575 | {13000000, 216000000, 864, 13, 4, 12}, | 584 | {13000000, 216000000, 864, 13, 4, 12}, |
| @@ -603,19 +612,18 @@ static struct tegra_clk_pll_params pll_d_params = { | |||
| 603 | .lock_mask = PLL_BASE_LOCK, | 612 | .lock_mask = PLL_BASE_LOCK, |
| 604 | .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE, | 613 | .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE, |
| 605 | .lock_delay = 1000, | 614 | .lock_delay = 1000, |
| 606 | .div_nmp = &pllp_nmp, | 615 | .div_nmp = &plld_nmp, |
| 607 | .freq_table = pll_d_freq_table, | 616 | .freq_table = pll_d_freq_table, |
| 608 | .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON | | 617 | .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON | |
| 609 | TEGRA_PLL_USE_LOCK, | 618 | TEGRA_PLL_USE_LOCK, |
| 610 | }; | 619 | }; |
| 611 | 620 | ||
| 612 | static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = { | 621 | static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = { |
| 613 | { 12000000, 148500000, 99, 1, 8}, | 622 | { 12000000, 594000000, 99, 1, 2}, |
| 614 | { 12000000, 594000000, 99, 1, 1}, | 623 | { 13000000, 594000000, 91, 1, 2}, /* actual: 591.5 MHz */ |
| 615 | { 13000000, 594000000, 91, 1, 1}, /* actual: 591.5 MHz */ | 624 | { 16800000, 594000000, 71, 1, 2}, /* actual: 596.4 MHz */ |
| 616 | { 16800000, 594000000, 71, 1, 1}, /* actual: 596.4 MHz */ | 625 | { 19200000, 594000000, 62, 1, 2}, /* actual: 595.2 MHz */ |
| 617 | { 19200000, 594000000, 62, 1, 1}, /* actual: 595.2 MHz */ | 626 | { 26000000, 594000000, 91, 2, 2}, /* actual: 591.5 MHz */ |
| 618 | { 26000000, 594000000, 91, 2, 1}, /* actual: 591.5 MHz */ | ||
| 619 | { 0, 0, 0, 0, 0, 0 }, | 627 | { 0, 0, 0, 0, 0, 0 }, |
| 620 | }; | 628 | }; |
| 621 | 629 | ||
| @@ -753,21 +761,19 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { | |||
| 753 | [tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true }, | 761 | [tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true }, |
| 754 | [tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true }, | 762 | [tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true }, |
| 755 | [tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true }, | 763 | [tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true }, |
| 756 | [tegra_clk_sdmmc2] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true }, | 764 | [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true }, |
| 757 | [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true }, | 765 | [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true }, |
| 758 | [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true }, | 766 | [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true }, |
| 759 | [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true }, | 767 | [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true }, |
| 760 | [tegra_clk_sdmmc1] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true }, | 768 | [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true }, |
| 761 | [tegra_clk_sdmmc4] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true }, | 769 | [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true }, |
| 762 | [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true }, | 770 | [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true }, |
| 763 | [tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true }, | 771 | [tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true }, |
| 764 | [tegra_clk_gr2d] = { .dt_id = TEGRA124_CLK_GR_2D, .present = true }, | ||
| 765 | [tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true }, | 772 | [tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true }, |
| 766 | [tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true }, | 773 | [tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true }, |
| 767 | [tegra_clk_gr3d] = { .dt_id = TEGRA124_CLK_GR_3D, .present = true }, | ||
| 768 | [tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true }, | 774 | [tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true }, |
| 769 | [tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true }, | 775 | [tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true }, |
| 770 | [tegra_clk_host1x] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true }, | 776 | [tegra_clk_host1x_8] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true }, |
| 771 | [tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true }, | 777 | [tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true }, |
| 772 | [tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true }, | 778 | [tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true }, |
| 773 | [tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true }, | 779 | [tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true }, |
| @@ -794,7 +800,7 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { | |||
| 794 | [tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true }, | 800 | [tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true }, |
| 795 | [tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true }, | 801 | [tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true }, |
| 796 | [tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true }, | 802 | [tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true }, |
| 797 | [tegra_clk_sdmmc3] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true }, | 803 | [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true }, |
| 798 | [tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true }, | 804 | [tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true }, |
| 799 | [tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true }, | 805 | [tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true }, |
| 800 | [tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true }, | 806 | [tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true }, |
| @@ -1286,9 +1292,9 @@ static void __init tegra124_pll_init(void __iomem *clk_base, | |||
| 1286 | clk_register_clkdev(clk, "pll_d2", NULL); | 1292 | clk_register_clkdev(clk, "pll_d2", NULL); |
| 1287 | clks[TEGRA124_CLK_PLL_D2] = clk; | 1293 | clks[TEGRA124_CLK_PLL_D2] = clk; |
| 1288 | 1294 | ||
| 1289 | /* PLLD2_OUT0 ?? */ | 1295 | /* PLLD2_OUT0 */ |
| 1290 | clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2", | 1296 | clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2", |
| 1291 | CLK_SET_RATE_PARENT, 1, 2); | 1297 | CLK_SET_RATE_PARENT, 1, 1); |
| 1292 | clk_register_clkdev(clk, "pll_d2_out0", NULL); | 1298 | clk_register_clkdev(clk, "pll_d2_out0", NULL); |
| 1293 | clks[TEGRA124_CLK_PLL_D2_OUT0] = clk; | 1299 | clks[TEGRA124_CLK_PLL_D2_OUT0] = clk; |
| 1294 | 1300 | ||
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index dbace152b2fa..dace2b1b5ae6 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c | |||
| @@ -574,6 +574,8 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = { | |||
| 574 | [tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true }, | 574 | [tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true }, |
| 575 | [tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true }, | 575 | [tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true }, |
| 576 | [tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true }, | 576 | [tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true }, |
| 577 | [tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true }, | ||
| 578 | [tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true }, | ||
| 577 | }; | 579 | }; |
| 578 | 580 | ||
| 579 | static unsigned long tegra20_clk_measure_input_freq(void) | 581 | static unsigned long tegra20_clk_measure_input_freq(void) |
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c index 02821b06a39e..a918bc481c52 100644 --- a/drivers/clocksource/vf_pit_timer.c +++ b/drivers/clocksource/vf_pit_timer.c | |||
| @@ -54,7 +54,7 @@ static inline void pit_irq_acknowledge(void) | |||
| 54 | 54 | ||
| 55 | static u64 pit_read_sched_clock(void) | 55 | static u64 pit_read_sched_clock(void) |
| 56 | { | 56 | { |
| 57 | return __raw_readl(clksrc_base + PITCVAL); | 57 | return ~__raw_readl(clksrc_base + PITCVAL); |
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | static int __init pit_clocksource_init(unsigned long rate) | 60 | static int __init pit_clocksource_init(unsigned long rate) |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index cb003a6b72c8..199b52b7c3e1 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -1109,12 +1109,27 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
| 1109 | goto err_set_policy_cpu; | 1109 | goto err_set_policy_cpu; |
| 1110 | } | 1110 | } |
| 1111 | 1111 | ||
| 1112 | /* related cpus should atleast have policy->cpus */ | ||
| 1113 | cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); | ||
| 1114 | |||
| 1115 | /* | ||
| 1116 | * affected cpus must always be the one, which are online. We aren't | ||
| 1117 | * managing offline cpus here. | ||
| 1118 | */ | ||
| 1119 | cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); | ||
| 1120 | |||
| 1121 | if (!frozen) { | ||
| 1122 | policy->user_policy.min = policy->min; | ||
| 1123 | policy->user_policy.max = policy->max; | ||
| 1124 | } | ||
| 1125 | |||
| 1126 | down_write(&policy->rwsem); | ||
| 1112 | write_lock_irqsave(&cpufreq_driver_lock, flags); | 1127 | write_lock_irqsave(&cpufreq_driver_lock, flags); |
| 1113 | for_each_cpu(j, policy->cpus) | 1128 | for_each_cpu(j, policy->cpus) |
| 1114 | per_cpu(cpufreq_cpu_data, j) = policy; | 1129 | per_cpu(cpufreq_cpu_data, j) = policy; |
| 1115 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1130 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
| 1116 | 1131 | ||
| 1117 | if (cpufreq_driver->get) { | 1132 | if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { |
| 1118 | policy->cur = cpufreq_driver->get(policy->cpu); | 1133 | policy->cur = cpufreq_driver->get(policy->cpu); |
| 1119 | if (!policy->cur) { | 1134 | if (!policy->cur) { |
| 1120 | pr_err("%s: ->get() failed\n", __func__); | 1135 | pr_err("%s: ->get() failed\n", __func__); |
| @@ -1162,20 +1177,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
| 1162 | } | 1177 | } |
| 1163 | } | 1178 | } |
| 1164 | 1179 | ||
| 1165 | /* related cpus should atleast have policy->cpus */ | ||
| 1166 | cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); | ||
| 1167 | |||
| 1168 | /* | ||
| 1169 | * affected cpus must always be the one, which are online. We aren't | ||
| 1170 | * managing offline cpus here. | ||
| 1171 | */ | ||
| 1172 | cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); | ||
| 1173 | |||
| 1174 | if (!frozen) { | ||
| 1175 | policy->user_policy.min = policy->min; | ||
| 1176 | policy->user_policy.max = policy->max; | ||
| 1177 | } | ||
| 1178 | |||
| 1179 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 1180 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
| 1180 | CPUFREQ_START, policy); | 1181 | CPUFREQ_START, policy); |
| 1181 | 1182 | ||
| @@ -1206,6 +1207,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
| 1206 | policy->user_policy.policy = policy->policy; | 1207 | policy->user_policy.policy = policy->policy; |
| 1207 | policy->user_policy.governor = policy->governor; | 1208 | policy->user_policy.governor = policy->governor; |
| 1208 | } | 1209 | } |
| 1210 | up_write(&policy->rwsem); | ||
| 1209 | 1211 | ||
| 1210 | kobject_uevent(&policy->kobj, KOBJ_ADD); | 1212 | kobject_uevent(&policy->kobj, KOBJ_ADD); |
| 1211 | up_read(&cpufreq_rwsem); | 1213 | up_read(&cpufreq_rwsem); |
| @@ -1546,23 +1548,16 @@ static unsigned int __cpufreq_get(unsigned int cpu) | |||
| 1546 | */ | 1548 | */ |
| 1547 | unsigned int cpufreq_get(unsigned int cpu) | 1549 | unsigned int cpufreq_get(unsigned int cpu) |
| 1548 | { | 1550 | { |
| 1549 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); | 1551 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); |
| 1550 | unsigned int ret_freq = 0; | 1552 | unsigned int ret_freq = 0; |
| 1551 | 1553 | ||
| 1552 | if (cpufreq_disabled() || !cpufreq_driver) | 1554 | if (policy) { |
| 1553 | return -ENOENT; | 1555 | down_read(&policy->rwsem); |
| 1554 | 1556 | ret_freq = __cpufreq_get(cpu); | |
| 1555 | BUG_ON(!policy); | 1557 | up_read(&policy->rwsem); |
| 1556 | |||
| 1557 | if (!down_read_trylock(&cpufreq_rwsem)) | ||
| 1558 | return 0; | ||
| 1559 | |||
| 1560 | down_read(&policy->rwsem); | ||
| 1561 | |||
| 1562 | ret_freq = __cpufreq_get(cpu); | ||
| 1563 | 1558 | ||
| 1564 | up_read(&policy->rwsem); | 1559 | cpufreq_cpu_put(policy); |
| 1565 | up_read(&cpufreq_rwsem); | 1560 | } |
| 1566 | 1561 | ||
| 1567 | return ret_freq; | 1562 | return ret_freq; |
| 1568 | } | 1563 | } |
| @@ -2148,7 +2143,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
| 2148 | * BIOS might change freq behind our back | 2143 | * BIOS might change freq behind our back |
| 2149 | * -> ask driver for current freq and notify governors about a change | 2144 | * -> ask driver for current freq and notify governors about a change |
| 2150 | */ | 2145 | */ |
| 2151 | if (cpufreq_driver->get) { | 2146 | if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { |
| 2152 | new_policy.cur = cpufreq_driver->get(cpu); | 2147 | new_policy.cur = cpufreq_driver->get(cpu); |
| 2153 | if (!policy->cur) { | 2148 | if (!policy->cur) { |
| 2154 | pr_debug("Driver did not initialize current freq"); | 2149 | pr_debug("Driver did not initialize current freq"); |
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index de4aa409abe2..2c6d5e118ac1 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c | |||
| @@ -916,7 +916,7 @@ static int lookup_existing_device(struct device *dev, void *data) | |||
| 916 | old->config_rom_retries = 0; | 916 | old->config_rom_retries = 0; |
| 917 | fw_notice(card, "rediscovered device %s\n", dev_name(dev)); | 917 | fw_notice(card, "rediscovered device %s\n", dev_name(dev)); |
| 918 | 918 | ||
| 919 | PREPARE_DELAYED_WORK(&old->work, fw_device_update); | 919 | old->workfn = fw_device_update; |
| 920 | fw_schedule_device_work(old, 0); | 920 | fw_schedule_device_work(old, 0); |
| 921 | 921 | ||
| 922 | if (current_node == card->root_node) | 922 | if (current_node == card->root_node) |
| @@ -1075,7 +1075,7 @@ static void fw_device_init(struct work_struct *work) | |||
| 1075 | if (atomic_cmpxchg(&device->state, | 1075 | if (atomic_cmpxchg(&device->state, |
| 1076 | FW_DEVICE_INITIALIZING, | 1076 | FW_DEVICE_INITIALIZING, |
| 1077 | FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { | 1077 | FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { |
| 1078 | PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); | 1078 | device->workfn = fw_device_shutdown; |
| 1079 | fw_schedule_device_work(device, SHUTDOWN_DELAY); | 1079 | fw_schedule_device_work(device, SHUTDOWN_DELAY); |
| 1080 | } else { | 1080 | } else { |
| 1081 | fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", | 1081 | fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", |
| @@ -1196,13 +1196,20 @@ static void fw_device_refresh(struct work_struct *work) | |||
| 1196 | dev_name(&device->device), fw_rcode_string(ret)); | 1196 | dev_name(&device->device), fw_rcode_string(ret)); |
| 1197 | gone: | 1197 | gone: |
| 1198 | atomic_set(&device->state, FW_DEVICE_GONE); | 1198 | atomic_set(&device->state, FW_DEVICE_GONE); |
| 1199 | PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); | 1199 | device->workfn = fw_device_shutdown; |
| 1200 | fw_schedule_device_work(device, SHUTDOWN_DELAY); | 1200 | fw_schedule_device_work(device, SHUTDOWN_DELAY); |
| 1201 | out: | 1201 | out: |
| 1202 | if (node_id == card->root_node->node_id) | 1202 | if (node_id == card->root_node->node_id) |
| 1203 | fw_schedule_bm_work(card, 0); | 1203 | fw_schedule_bm_work(card, 0); |
| 1204 | } | 1204 | } |
| 1205 | 1205 | ||
| 1206 | static void fw_device_workfn(struct work_struct *work) | ||
| 1207 | { | ||
| 1208 | struct fw_device *device = container_of(to_delayed_work(work), | ||
| 1209 | struct fw_device, work); | ||
| 1210 | device->workfn(work); | ||
| 1211 | } | ||
| 1212 | |||
| 1206 | void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | 1213 | void fw_node_event(struct fw_card *card, struct fw_node *node, int event) |
| 1207 | { | 1214 | { |
| 1208 | struct fw_device *device; | 1215 | struct fw_device *device; |
| @@ -1252,7 +1259,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | |||
| 1252 | * power-up after getting plugged in. We schedule the | 1259 | * power-up after getting plugged in. We schedule the |
| 1253 | * first config rom scan half a second after bus reset. | 1260 | * first config rom scan half a second after bus reset. |
| 1254 | */ | 1261 | */ |
| 1255 | INIT_DELAYED_WORK(&device->work, fw_device_init); | 1262 | device->workfn = fw_device_init; |
| 1263 | INIT_DELAYED_WORK(&device->work, fw_device_workfn); | ||
| 1256 | fw_schedule_device_work(device, INITIAL_DELAY); | 1264 | fw_schedule_device_work(device, INITIAL_DELAY); |
| 1257 | break; | 1265 | break; |
| 1258 | 1266 | ||
| @@ -1268,7 +1276,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | |||
| 1268 | if (atomic_cmpxchg(&device->state, | 1276 | if (atomic_cmpxchg(&device->state, |
| 1269 | FW_DEVICE_RUNNING, | 1277 | FW_DEVICE_RUNNING, |
| 1270 | FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { | 1278 | FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { |
| 1271 | PREPARE_DELAYED_WORK(&device->work, fw_device_refresh); | 1279 | device->workfn = fw_device_refresh; |
| 1272 | fw_schedule_device_work(device, | 1280 | fw_schedule_device_work(device, |
| 1273 | device->is_local ? 0 : INITIAL_DELAY); | 1281 | device->is_local ? 0 : INITIAL_DELAY); |
| 1274 | } | 1282 | } |
| @@ -1283,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | |||
| 1283 | smp_wmb(); /* update node_id before generation */ | 1291 | smp_wmb(); /* update node_id before generation */ |
| 1284 | device->generation = card->generation; | 1292 | device->generation = card->generation; |
| 1285 | if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { | 1293 | if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { |
| 1286 | PREPARE_DELAYED_WORK(&device->work, fw_device_update); | 1294 | device->workfn = fw_device_update; |
| 1287 | fw_schedule_device_work(device, 0); | 1295 | fw_schedule_device_work(device, 0); |
| 1288 | } | 1296 | } |
| 1289 | break; | 1297 | break; |
| @@ -1308,7 +1316,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | |||
| 1308 | device = node->data; | 1316 | device = node->data; |
| 1309 | if (atomic_xchg(&device->state, | 1317 | if (atomic_xchg(&device->state, |
| 1310 | FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { | 1318 | FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { |
| 1311 | PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); | 1319 | device->workfn = fw_device_shutdown; |
| 1312 | fw_schedule_device_work(device, | 1320 | fw_schedule_device_work(device, |
| 1313 | list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); | 1321 | list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); |
| 1314 | } | 1322 | } |
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 6b895986dc22..4af0a7bad7f2 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c | |||
| @@ -929,8 +929,6 @@ static void fwnet_write_complete(struct fw_card *card, int rcode, | |||
| 929 | if (rcode == RCODE_COMPLETE) { | 929 | if (rcode == RCODE_COMPLETE) { |
| 930 | fwnet_transmit_packet_done(ptask); | 930 | fwnet_transmit_packet_done(ptask); |
| 931 | } else { | 931 | } else { |
| 932 | fwnet_transmit_packet_failed(ptask); | ||
| 933 | |||
| 934 | if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { | 932 | if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { |
| 935 | dev_err(&ptask->dev->netdev->dev, | 933 | dev_err(&ptask->dev->netdev->dev, |
| 936 | "fwnet_write_complete failed: %x (skipped %d)\n", | 934 | "fwnet_write_complete failed: %x (skipped %d)\n", |
| @@ -938,8 +936,10 @@ static void fwnet_write_complete(struct fw_card *card, int rcode, | |||
| 938 | 936 | ||
| 939 | errors_skipped = 0; | 937 | errors_skipped = 0; |
| 940 | last_rcode = rcode; | 938 | last_rcode = rcode; |
| 941 | } else | 939 | } else { |
| 942 | errors_skipped++; | 940 | errors_skipped++; |
| 941 | } | ||
| 942 | fwnet_transmit_packet_failed(ptask); | ||
| 943 | } | 943 | } |
| 944 | } | 944 | } |
| 945 | 945 | ||
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 6f74d8d3f700..8db663219560 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
| @@ -290,7 +290,6 @@ static char ohci_driver_name[] = KBUILD_MODNAME; | |||
| 290 | #define QUIRK_NO_MSI 0x10 | 290 | #define QUIRK_NO_MSI 0x10 |
| 291 | #define QUIRK_TI_SLLZ059 0x20 | 291 | #define QUIRK_TI_SLLZ059 0x20 |
| 292 | #define QUIRK_IR_WAKE 0x40 | 292 | #define QUIRK_IR_WAKE 0x40 |
| 293 | #define QUIRK_PHY_LCTRL_TIMEOUT 0x80 | ||
| 294 | 293 | ||
| 295 | /* In case of multiple matches in ohci_quirks[], only the first one is used. */ | 294 | /* In case of multiple matches in ohci_quirks[], only the first one is used. */ |
| 296 | static const struct { | 295 | static const struct { |
| @@ -303,10 +302,7 @@ static const struct { | |||
| 303 | QUIRK_BE_HEADERS}, | 302 | QUIRK_BE_HEADERS}, |
| 304 | 303 | ||
| 305 | {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, | 304 | {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, |
| 306 | QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI}, | 305 | QUIRK_NO_MSI}, |
| 307 | |||
| 308 | {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID, | ||
| 309 | QUIRK_PHY_LCTRL_TIMEOUT}, | ||
| 310 | 306 | ||
| 311 | {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, | 307 | {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, |
| 312 | QUIRK_RESET_PACKET}, | 308 | QUIRK_RESET_PACKET}, |
| @@ -353,7 +349,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" | |||
| 353 | ", disable MSI = " __stringify(QUIRK_NO_MSI) | 349 | ", disable MSI = " __stringify(QUIRK_NO_MSI) |
| 354 | ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059) | 350 | ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059) |
| 355 | ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE) | 351 | ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE) |
| 356 | ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT) | ||
| 357 | ")"); | 352 | ")"); |
| 358 | 353 | ||
| 359 | #define OHCI_PARAM_DEBUG_AT_AR 1 | 354 | #define OHCI_PARAM_DEBUG_AT_AR 1 |
| @@ -2299,9 +2294,6 @@ static int ohci_enable(struct fw_card *card, | |||
| 2299 | * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but | 2294 | * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but |
| 2300 | * cannot actually use the phy at that time. These need tens of | 2295 | * cannot actually use the phy at that time. These need tens of |
| 2301 | * millisecods pause between LPS write and first phy access too. | 2296 | * millisecods pause between LPS write and first phy access too. |
| 2302 | * | ||
| 2303 | * But do not wait for 50msec on Agere/LSI cards. Their phy | ||
| 2304 | * arbitration state machine may time out during such a long wait. | ||
| 2305 | */ | 2297 | */ |
| 2306 | 2298 | ||
| 2307 | reg_write(ohci, OHCI1394_HCControlSet, | 2299 | reg_write(ohci, OHCI1394_HCControlSet, |
| @@ -2309,11 +2301,8 @@ static int ohci_enable(struct fw_card *card, | |||
| 2309 | OHCI1394_HCControl_postedWriteEnable); | 2301 | OHCI1394_HCControl_postedWriteEnable); |
| 2310 | flush_writes(ohci); | 2302 | flush_writes(ohci); |
| 2311 | 2303 | ||
| 2312 | if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT)) | 2304 | for (lps = 0, i = 0; !lps && i < 3; i++) { |
| 2313 | msleep(50); | 2305 | msleep(50); |
| 2314 | |||
| 2315 | for (lps = 0, i = 0; !lps && i < 150; i++) { | ||
| 2316 | msleep(1); | ||
| 2317 | lps = reg_read(ohci, OHCI1394_HCControlSet) & | 2306 | lps = reg_read(ohci, OHCI1394_HCControlSet) & |
| 2318 | OHCI1394_HCControl_LPS; | 2307 | OHCI1394_HCControl_LPS; |
| 2319 | } | 2308 | } |
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 281029daf98c..7aef911fdc71 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
| @@ -146,6 +146,7 @@ struct sbp2_logical_unit { | |||
| 146 | */ | 146 | */ |
| 147 | int generation; | 147 | int generation; |
| 148 | int retries; | 148 | int retries; |
| 149 | work_func_t workfn; | ||
| 149 | struct delayed_work work; | 150 | struct delayed_work work; |
| 150 | bool has_sdev; | 151 | bool has_sdev; |
| 151 | bool blocked; | 152 | bool blocked; |
| @@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work) | |||
| 864 | /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ | 865 | /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ |
| 865 | sbp2_set_busy_timeout(lu); | 866 | sbp2_set_busy_timeout(lu); |
| 866 | 867 | ||
| 867 | PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); | 868 | lu->workfn = sbp2_reconnect; |
| 868 | sbp2_agent_reset(lu); | 869 | sbp2_agent_reset(lu); |
| 869 | 870 | ||
| 870 | /* This was a re-login. */ | 871 | /* This was a re-login. */ |
| @@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work) | |||
| 918 | * If a bus reset happened, sbp2_update will have requeued | 919 | * If a bus reset happened, sbp2_update will have requeued |
| 919 | * lu->work already. Reset the work from reconnect to login. | 920 | * lu->work already. Reset the work from reconnect to login. |
| 920 | */ | 921 | */ |
| 921 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | 922 | lu->workfn = sbp2_login; |
| 922 | } | 923 | } |
| 923 | 924 | ||
| 924 | static void sbp2_reconnect(struct work_struct *work) | 925 | static void sbp2_reconnect(struct work_struct *work) |
| @@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work) | |||
| 952 | lu->retries++ >= 5) { | 953 | lu->retries++ >= 5) { |
| 953 | dev_err(tgt_dev(tgt), "failed to reconnect\n"); | 954 | dev_err(tgt_dev(tgt), "failed to reconnect\n"); |
| 954 | lu->retries = 0; | 955 | lu->retries = 0; |
| 955 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | 956 | lu->workfn = sbp2_login; |
| 956 | } | 957 | } |
| 957 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); | 958 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); |
| 958 | 959 | ||
| @@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work) | |||
| 972 | sbp2_conditionally_unblock(lu); | 973 | sbp2_conditionally_unblock(lu); |
| 973 | } | 974 | } |
| 974 | 975 | ||
| 976 | static void sbp2_lu_workfn(struct work_struct *work) | ||
| 977 | { | ||
| 978 | struct sbp2_logical_unit *lu = container_of(to_delayed_work(work), | ||
| 979 | struct sbp2_logical_unit, work); | ||
| 980 | lu->workfn(work); | ||
| 981 | } | ||
| 982 | |||
| 975 | static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | 983 | static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) |
| 976 | { | 984 | { |
| 977 | struct sbp2_logical_unit *lu; | 985 | struct sbp2_logical_unit *lu; |
| @@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | |||
| 998 | lu->blocked = false; | 1006 | lu->blocked = false; |
| 999 | ++tgt->dont_block; | 1007 | ++tgt->dont_block; |
| 1000 | INIT_LIST_HEAD(&lu->orb_list); | 1008 | INIT_LIST_HEAD(&lu->orb_list); |
| 1001 | INIT_DELAYED_WORK(&lu->work, sbp2_login); | 1009 | lu->workfn = sbp2_login; |
| 1010 | INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn); | ||
| 1002 | 1011 | ||
| 1003 | list_add_tail(&lu->link, &tgt->lu_list); | 1012 | list_add_tail(&lu->link, &tgt->lu_list); |
| 1004 | return 0; | 1013 | return 0; |
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index acf3a36c9ebc..32982da82694 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c | |||
| @@ -68,15 +68,7 @@ void __armada_drm_queue_unref_work(struct drm_device *dev, | |||
| 68 | { | 68 | { |
| 69 | struct armada_private *priv = dev->dev_private; | 69 | struct armada_private *priv = dev->dev_private; |
| 70 | 70 | ||
| 71 | /* | 71 | WARN_ON(!kfifo_put(&priv->fb_unref, fb)); |
| 72 | * Yes, we really must jump through these hoops just to store a | ||
| 73 | * _pointer_ to something into the kfifo. This is utterly insane | ||
| 74 | * and idiotic, because it kfifo requires the _data_ pointed to by | ||
| 75 | * the pointer const, not the pointer itself. Not only that, but | ||
| 76 | * you have to pass a pointer _to_ the pointer you want stored. | ||
| 77 | */ | ||
| 78 | const struct drm_framebuffer *silly_api_alert = fb; | ||
| 79 | WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert)); | ||
| 80 | schedule_work(&priv->fb_unref_work); | 72 | schedule_work(&priv->fb_unref_work); |
| 81 | } | 73 | } |
| 82 | 74 | ||
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig index c8fcf12019f0..5f8b0c2b9a44 100644 --- a/drivers/gpu/drm/bochs/Kconfig +++ b/drivers/gpu/drm/bochs/Kconfig | |||
| @@ -2,6 +2,7 @@ config DRM_BOCHS | |||
| 2 | tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" | 2 | tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" |
| 3 | depends on DRM && PCI | 3 | depends on DRM && PCI |
| 4 | select DRM_KMS_HELPER | 4 | select DRM_KMS_HELPER |
| 5 | select DRM_KMS_FB_HELPER | ||
| 5 | select FB_SYS_FILLRECT | 6 | select FB_SYS_FILLRECT |
| 6 | select FB_SYS_COPYAREA | 7 | select FB_SYS_COPYAREA |
| 7 | select FB_SYS_IMAGEBLIT | 8 | select FB_SYS_IMAGEBLIT |
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 5736aaa7e86c..f7af69bcf3f4 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c | |||
| @@ -468,8 +468,8 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) | |||
| 468 | } else { | 468 | } else { |
| 469 | list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, | 469 | list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, |
| 470 | legacy_dev_list) { | 470 | legacy_dev_list) { |
| 471 | drm_put_dev(dev); | ||
| 472 | list_del(&dev->legacy_dev_list); | 471 | list_del(&dev->legacy_dev_list); |
| 472 | drm_put_dev(dev); | ||
| 473 | } | 473 | } |
| 474 | } | 474 | } |
| 475 | DRM_INFO("Module unloaded\n"); | 475 | DRM_INFO("Module unloaded\n"); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 215131ab1dd2..c204b4e3356e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
| @@ -172,20 +172,24 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) | |||
| 172 | 172 | ||
| 173 | ret = exynos_drm_subdrv_open(dev, file); | 173 | ret = exynos_drm_subdrv_open(dev, file); |
| 174 | if (ret) | 174 | if (ret) |
| 175 | goto out; | 175 | goto err_file_priv_free; |
| 176 | 176 | ||
| 177 | anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, | 177 | anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, |
| 178 | NULL, 0); | 178 | NULL, 0); |
| 179 | if (IS_ERR(anon_filp)) { | 179 | if (IS_ERR(anon_filp)) { |
| 180 | ret = PTR_ERR(anon_filp); | 180 | ret = PTR_ERR(anon_filp); |
| 181 | goto out; | 181 | goto err_subdrv_close; |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | anon_filp->f_mode = FMODE_READ | FMODE_WRITE; | 184 | anon_filp->f_mode = FMODE_READ | FMODE_WRITE; |
| 185 | file_priv->anon_filp = anon_filp; | 185 | file_priv->anon_filp = anon_filp; |
| 186 | 186 | ||
| 187 | return ret; | 187 | return ret; |
| 188 | out: | 188 | |
| 189 | err_subdrv_close: | ||
| 190 | exynos_drm_subdrv_close(dev, file); | ||
| 191 | |||
| 192 | err_file_priv_free: | ||
| 189 | kfree(file_priv); | 193 | kfree(file_priv); |
| 190 | file->driver_priv = NULL; | 194 | file->driver_priv = NULL; |
| 191 | return ret; | 195 | return ret; |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 04f1f02c4019..ec7bb0fc71bc 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -403,7 +403,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist); | |||
| 403 | void intel_detect_pch(struct drm_device *dev) | 403 | void intel_detect_pch(struct drm_device *dev) |
| 404 | { | 404 | { |
| 405 | struct drm_i915_private *dev_priv = dev->dev_private; | 405 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 406 | struct pci_dev *pch; | 406 | struct pci_dev *pch = NULL; |
| 407 | 407 | ||
| 408 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting | 408 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting |
| 409 | * (which really amounts to a PCH but no South Display). | 409 | * (which really amounts to a PCH but no South Display). |
| @@ -424,12 +424,9 @@ void intel_detect_pch(struct drm_device *dev) | |||
| 424 | * all the ISA bridge devices and check for the first match, instead | 424 | * all the ISA bridge devices and check for the first match, instead |
| 425 | * of only checking the first one. | 425 | * of only checking the first one. |
| 426 | */ | 426 | */ |
| 427 | pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); | 427 | while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { |
| 428 | while (pch) { | ||
| 429 | struct pci_dev *curr = pch; | ||
| 430 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { | 428 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { |
| 431 | unsigned short id; | 429 | unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; |
| 432 | id = pch->device & INTEL_PCH_DEVICE_ID_MASK; | ||
| 433 | dev_priv->pch_id = id; | 430 | dev_priv->pch_id = id; |
| 434 | 431 | ||
| 435 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { | 432 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { |
| @@ -461,18 +458,16 @@ void intel_detect_pch(struct drm_device *dev) | |||
| 461 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); | 458 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); |
| 462 | WARN_ON(!IS_HASWELL(dev)); | 459 | WARN_ON(!IS_HASWELL(dev)); |
| 463 | WARN_ON(!IS_ULT(dev)); | 460 | WARN_ON(!IS_ULT(dev)); |
| 464 | } else { | 461 | } else |
| 465 | goto check_next; | 462 | continue; |
| 466 | } | 463 | |
| 467 | pci_dev_put(pch); | ||
| 468 | break; | 464 | break; |
| 469 | } | 465 | } |
| 470 | check_next: | ||
| 471 | pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr); | ||
| 472 | pci_dev_put(curr); | ||
| 473 | } | 466 | } |
| 474 | if (!pch) | 467 | if (!pch) |
| 475 | DRM_DEBUG_KMS("No PCH found?\n"); | 468 | DRM_DEBUG_KMS("No PCH found.\n"); |
| 469 | |||
| 470 | pci_dev_put(pch); | ||
| 476 | } | 471 | } |
| 477 | 472 | ||
| 478 | bool i915_semaphore_is_enabled(struct drm_device *dev) | 473 | bool i915_semaphore_is_enabled(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 1a24e84f2315..28d24caa49f3 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
| @@ -82,9 +82,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) | |||
| 82 | r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, | 82 | r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, |
| 83 | "Graphics Stolen Memory"); | 83 | "Graphics Stolen Memory"); |
| 84 | if (r == NULL) { | 84 | if (r == NULL) { |
| 85 | DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", | 85 | /* |
| 86 | base, base + (uint32_t)dev_priv->gtt.stolen_size); | 86 | * One more attempt but this time requesting region from |
| 87 | base = 0; | 87 | * base + 1, as we have seen that this resolves the region |
| 88 | * conflict with the PCI Bus. | ||
| 89 | * This is a BIOS w/a: Some BIOS wrap stolen in the root | ||
| 90 | * PCI bus, but have an off-by-one error. Hence retry the | ||
| 91 | * reservation starting from 1 instead of 0. | ||
| 92 | */ | ||
| 93 | r = devm_request_mem_region(dev->dev, base + 1, | ||
| 94 | dev_priv->gtt.stolen_size - 1, | ||
| 95 | "Graphics Stolen Memory"); | ||
| 96 | if (r == NULL) { | ||
| 97 | DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", | ||
| 98 | base, base + (uint32_t)dev_priv->gtt.stolen_size); | ||
| 99 | base = 0; | ||
| 100 | } | ||
| 88 | } | 101 | } |
| 89 | 102 | ||
| 90 | return base; | 103 | return base; |
| @@ -201,6 +214,13 @@ int i915_gem_init_stolen(struct drm_device *dev) | |||
| 201 | struct drm_i915_private *dev_priv = dev->dev_private; | 214 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 202 | int bios_reserved = 0; | 215 | int bios_reserved = 0; |
| 203 | 216 | ||
| 217 | #ifdef CONFIG_INTEL_IOMMU | ||
| 218 | if (intel_iommu_gfx_mapped) { | ||
| 219 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); | ||
| 220 | return 0; | ||
| 221 | } | ||
| 222 | #endif | ||
| 223 | |||
| 204 | if (dev_priv->gtt.stolen_size == 0) | 224 | if (dev_priv->gtt.stolen_size == 0) |
| 205 | return 0; | 225 | return 0; |
| 206 | 226 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9fec71175571..d554169ac592 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -618,33 +618,25 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | |||
| 618 | 618 | ||
| 619 | /* raw reads, only for fast reads of display block, no need for forcewake etc. */ | 619 | /* raw reads, only for fast reads of display block, no need for forcewake etc. */ |
| 620 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) | 620 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) |
| 621 | #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) | ||
| 622 | 621 | ||
| 623 | static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) | 622 | static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) |
| 624 | { | 623 | { |
| 625 | struct drm_i915_private *dev_priv = dev->dev_private; | 624 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 626 | uint32_t status; | 625 | uint32_t status; |
| 627 | 626 | int reg; | |
| 628 | if (INTEL_INFO(dev)->gen < 7) { | 627 | |
| 629 | status = pipe == PIPE_A ? | 628 | if (INTEL_INFO(dev)->gen >= 8) { |
| 630 | DE_PIPEA_VBLANK : | 629 | status = GEN8_PIPE_VBLANK; |
| 631 | DE_PIPEB_VBLANK; | 630 | reg = GEN8_DE_PIPE_ISR(pipe); |
| 631 | } else if (INTEL_INFO(dev)->gen >= 7) { | ||
| 632 | status = DE_PIPE_VBLANK_IVB(pipe); | ||
| 633 | reg = DEISR; | ||
| 632 | } else { | 634 | } else { |
| 633 | switch (pipe) { | 635 | status = DE_PIPE_VBLANK(pipe); |
| 634 | default: | 636 | reg = DEISR; |
| 635 | case PIPE_A: | ||
| 636 | status = DE_PIPEA_VBLANK_IVB; | ||
| 637 | break; | ||
| 638 | case PIPE_B: | ||
| 639 | status = DE_PIPEB_VBLANK_IVB; | ||
| 640 | break; | ||
| 641 | case PIPE_C: | ||
| 642 | status = DE_PIPEC_VBLANK_IVB; | ||
| 643 | break; | ||
| 644 | } | ||
| 645 | } | 637 | } |
| 646 | 638 | ||
| 647 | return __raw_i915_read32(dev_priv, DEISR) & status; | 639 | return __raw_i915_read32(dev_priv, reg) & status; |
| 648 | } | 640 | } |
| 649 | 641 | ||
| 650 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | 642 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, |
| @@ -702,7 +694,28 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | |||
| 702 | else | 694 | else |
| 703 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; | 695 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; |
| 704 | 696 | ||
| 705 | if (HAS_PCH_SPLIT(dev)) { | 697 | if (HAS_DDI(dev)) { |
| 698 | /* | ||
| 699 | * On HSW HDMI outputs there seems to be a 2 line | ||
| 700 | * difference, whereas eDP has the normal 1 line | ||
| 701 | * difference that earlier platforms have. External | ||
| 702 | * DP is unknown. For now just check for the 2 line | ||
| 703 | * difference case on all output types on HSW+. | ||
| 704 | * | ||
| 705 | * This might misinterpret the scanline counter being | ||
| 706 | * one line too far along on eDP, but that's less | ||
| 707 | * dangerous than the alternative since that would lead | ||
| 708 | * the vblank timestamp code astray when it sees a | ||
| 709 | * scanline count before vblank_start during a vblank | ||
| 710 | * interrupt. | ||
| 711 | */ | ||
| 712 | in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); | ||
| 713 | if ((in_vbl && (position == vbl_start - 2 || | ||
| 714 | position == vbl_start - 1)) || | ||
| 715 | (!in_vbl && (position == vbl_end - 2 || | ||
| 716 | position == vbl_end - 1))) | ||
| 717 | position = (position + 2) % vtotal; | ||
| 718 | } else if (HAS_PCH_SPLIT(dev)) { | ||
| 706 | /* | 719 | /* |
| 707 | * The scanline counter increments at the leading edge | 720 | * The scanline counter increments at the leading edge |
| 708 | * of hsync, ie. it completely misses the active portion | 721 | * of hsync, ie. it completely misses the active portion |
| @@ -2769,10 +2782,9 @@ static void ibx_irq_postinstall(struct drm_device *dev) | |||
| 2769 | return; | 2782 | return; |
| 2770 | 2783 | ||
| 2771 | if (HAS_PCH_IBX(dev)) { | 2784 | if (HAS_PCH_IBX(dev)) { |
| 2772 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | | 2785 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; |
| 2773 | SDE_TRANSA_FIFO_UNDER | SDE_POISON; | ||
| 2774 | } else { | 2786 | } else { |
| 2775 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; | 2787 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; |
| 2776 | 2788 | ||
| 2777 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); | 2789 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); |
| 2778 | } | 2790 | } |
| @@ -2832,20 +2844,19 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
| 2832 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | | 2844 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | |
| 2833 | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | | 2845 | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | |
| 2834 | DE_PLANEB_FLIP_DONE_IVB | | 2846 | DE_PLANEB_FLIP_DONE_IVB | |
| 2835 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | | 2847 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); |
| 2836 | DE_ERR_INT_IVB); | ||
| 2837 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | | 2848 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | |
| 2838 | DE_PIPEA_VBLANK_IVB); | 2849 | DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); |
| 2839 | 2850 | ||
| 2840 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); | 2851 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); |
| 2841 | } else { | 2852 | } else { |
| 2842 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 2853 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
| 2843 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | | 2854 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | |
| 2844 | DE_AUX_CHANNEL_A | | 2855 | DE_AUX_CHANNEL_A | |
| 2845 | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | | ||
| 2846 | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | | 2856 | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | |
| 2847 | DE_POISON); | 2857 | DE_POISON); |
| 2848 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; | 2858 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | |
| 2859 | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; | ||
| 2849 | } | 2860 | } |
| 2850 | 2861 | ||
| 2851 | dev_priv->irq_mask = ~display_mask; | 2862 | dev_priv->irq_mask = ~display_mask; |
| @@ -2961,9 +2972,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | |||
| 2961 | struct drm_device *dev = dev_priv->dev; | 2972 | struct drm_device *dev = dev_priv->dev; |
| 2962 | uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | | 2973 | uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | |
| 2963 | GEN8_PIPE_CDCLK_CRC_DONE | | 2974 | GEN8_PIPE_CDCLK_CRC_DONE | |
| 2964 | GEN8_PIPE_FIFO_UNDERRUN | | ||
| 2965 | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; | 2975 | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; |
| 2966 | uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK; | 2976 | uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | |
| 2977 | GEN8_PIPE_FIFO_UNDERRUN; | ||
| 2967 | int pipe; | 2978 | int pipe; |
| 2968 | dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; | 2979 | dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; |
| 2969 | dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; | 2980 | dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index e06b9e017d6b..234ac5f7bc5a 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -1244,6 +1244,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) | |||
| 1244 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { | 1244 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
| 1245 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1245 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
| 1246 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 1246 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
| 1247 | ironlake_edp_panel_vdd_on(intel_dp); | ||
| 1247 | ironlake_edp_panel_off(intel_dp); | 1248 | ironlake_edp_panel_off(intel_dp); |
| 1248 | } | 1249 | } |
| 1249 | 1250 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4c1672809493..9b8a7c7ea7fc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -1092,12 +1092,12 @@ static void assert_cursor(struct drm_i915_private *dev_priv, | |||
| 1092 | struct drm_device *dev = dev_priv->dev; | 1092 | struct drm_device *dev = dev_priv->dev; |
| 1093 | bool cur_state; | 1093 | bool cur_state; |
| 1094 | 1094 | ||
| 1095 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 1095 | if (IS_845G(dev) || IS_I865G(dev)) |
| 1096 | cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; | ||
| 1097 | else if (IS_845G(dev) || IS_I865G(dev)) | ||
| 1098 | cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; | 1096 | cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; |
| 1099 | else | 1097 | else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) |
| 1100 | cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; | 1098 | cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; |
| 1099 | else | ||
| 1100 | cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; | ||
| 1101 | 1101 | ||
| 1102 | WARN(cur_state != state, | 1102 | WARN(cur_state != state, |
| 1103 | "cursor on pipe %c assertion failure (expected %s, current %s)\n", | 1103 | "cursor on pipe %c assertion failure (expected %s, current %s)\n", |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 57552eb386b0..2688f6d64bb9 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -1249,17 +1249,24 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp) | |||
| 1249 | 1249 | ||
| 1250 | DRM_DEBUG_KMS("Turn eDP power off\n"); | 1250 | DRM_DEBUG_KMS("Turn eDP power off\n"); |
| 1251 | 1251 | ||
| 1252 | WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); | ||
| 1253 | |||
| 1252 | pp = ironlake_get_pp_control(intel_dp); | 1254 | pp = ironlake_get_pp_control(intel_dp); |
| 1253 | /* We need to switch off panel power _and_ force vdd, for otherwise some | 1255 | /* We need to switch off panel power _and_ force vdd, for otherwise some |
| 1254 | * panels get very unhappy and cease to work. */ | 1256 | * panels get very unhappy and cease to work. */ |
| 1255 | pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE); | 1257 | pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); |
| 1256 | 1258 | ||
| 1257 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); | 1259 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); |
| 1258 | 1260 | ||
| 1259 | I915_WRITE(pp_ctrl_reg, pp); | 1261 | I915_WRITE(pp_ctrl_reg, pp); |
| 1260 | POSTING_READ(pp_ctrl_reg); | 1262 | POSTING_READ(pp_ctrl_reg); |
| 1261 | 1263 | ||
| 1264 | intel_dp->want_panel_vdd = false; | ||
| 1265 | |||
| 1262 | ironlake_wait_panel_off(intel_dp); | 1266 | ironlake_wait_panel_off(intel_dp); |
| 1267 | |||
| 1268 | /* We got a reference when we enabled the VDD. */ | ||
| 1269 | intel_runtime_pm_put(dev_priv); | ||
| 1263 | } | 1270 | } |
| 1264 | 1271 | ||
| 1265 | void ironlake_edp_backlight_on(struct intel_dp *intel_dp) | 1272 | void ironlake_edp_backlight_on(struct intel_dp *intel_dp) |
| @@ -1639,7 +1646,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) | |||
| 1639 | val |= EDP_PSR_LINK_DISABLE; | 1646 | val |= EDP_PSR_LINK_DISABLE; |
| 1640 | 1647 | ||
| 1641 | I915_WRITE(EDP_PSR_CTL(dev), val | | 1648 | I915_WRITE(EDP_PSR_CTL(dev), val | |
| 1642 | IS_BROADWELL(dev) ? 0 : link_entry_time | | 1649 | (IS_BROADWELL(dev) ? 0 : link_entry_time) | |
| 1643 | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | | 1650 | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | |
| 1644 | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | | 1651 | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | |
| 1645 | EDP_PSR_ENABLE); | 1652 | EDP_PSR_ENABLE); |
| @@ -1784,6 +1791,7 @@ static void intel_disable_dp(struct intel_encoder *encoder) | |||
| 1784 | 1791 | ||
| 1785 | /* Make sure the panel is off before trying to change the mode. But also | 1792 | /* Make sure the panel is off before trying to change the mode. But also |
| 1786 | * ensure that we have vdd while we switch off the panel. */ | 1793 | * ensure that we have vdd while we switch off the panel. */ |
| 1794 | ironlake_edp_panel_vdd_on(intel_dp); | ||
| 1787 | ironlake_edp_backlight_off(intel_dp); | 1795 | ironlake_edp_backlight_off(intel_dp); |
| 1788 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 1796 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
| 1789 | ironlake_edp_panel_off(intel_dp); | 1797 | ironlake_edp_panel_off(intel_dp); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 6db0d9d17f47..ee3181ebcc92 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -845,7 +845,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi) | |||
| 845 | { | 845 | { |
| 846 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | 846 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); |
| 847 | 847 | ||
| 848 | if (IS_G4X(dev)) | 848 | if (!hdmi->has_hdmi_sink || IS_G4X(dev)) |
| 849 | return 165000; | 849 | return 165000; |
| 850 | else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) | 850 | else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) |
| 851 | return 300000; | 851 | return 300000; |
| @@ -899,8 +899,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 899 | * outputs. We also need to check that the higher clock still fits | 899 | * outputs. We also need to check that the higher clock still fits |
| 900 | * within limits. | 900 | * within limits. |
| 901 | */ | 901 | */ |
| 902 | if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit | 902 | if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && |
| 903 | && HAS_PCH_SPLIT(dev)) { | 903 | clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) { |
| 904 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); | 904 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); |
| 905 | desired_bpp = 12*3; | 905 | desired_bpp = 12*3; |
| 906 | 906 | ||
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 350de359123a..079ea38f14d9 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
| @@ -698,7 +698,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector) | |||
| 698 | freq /= 0xff; | 698 | freq /= 0xff; |
| 699 | 699 | ||
| 700 | ctl = freq << 17; | 700 | ctl = freq << 17; |
| 701 | if (IS_GEN2(dev) && panel->backlight.combination_mode) | 701 | if (panel->backlight.combination_mode) |
| 702 | ctl |= BLM_LEGACY_MODE; | 702 | ctl |= BLM_LEGACY_MODE; |
| 703 | if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm) | 703 | if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm) |
| 704 | ctl |= BLM_POLARITY_PNV; | 704 | ctl |= BLM_POLARITY_PNV; |
| @@ -979,7 +979,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector) | |||
| 979 | 979 | ||
| 980 | ctl = I915_READ(BLC_PWM_CTL); | 980 | ctl = I915_READ(BLC_PWM_CTL); |
| 981 | 981 | ||
| 982 | if (IS_GEN2(dev)) | 982 | if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev)) |
| 983 | panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; | 983 | panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; |
| 984 | 984 | ||
| 985 | if (IS_PINEVIEW(dev)) | 985 | if (IS_PINEVIEW(dev)) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d77cc81900f9..e1fc35a72656 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -3493,6 +3493,8 @@ static void valleyview_setup_pctx(struct drm_device *dev) | |||
| 3493 | u32 pcbr; | 3493 | u32 pcbr; |
| 3494 | int pctx_size = 24*1024; | 3494 | int pctx_size = 24*1024; |
| 3495 | 3495 | ||
| 3496 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
| 3497 | |||
| 3496 | pcbr = I915_READ(VLV_PCBR); | 3498 | pcbr = I915_READ(VLV_PCBR); |
| 3497 | if (pcbr) { | 3499 | if (pcbr) { |
| 3498 | /* BIOS set it up already, grab the pre-alloc'd space */ | 3500 | /* BIOS set it up already, grab the pre-alloc'd space */ |
| @@ -3542,8 +3544,6 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
| 3542 | I915_WRITE(GTFIFODBG, gtfifodbg); | 3544 | I915_WRITE(GTFIFODBG, gtfifodbg); |
| 3543 | } | 3545 | } |
| 3544 | 3546 | ||
| 3545 | valleyview_setup_pctx(dev); | ||
| 3546 | |||
| 3547 | /* If VLV, Forcewake all wells, else re-direct to regular path */ | 3547 | /* If VLV, Forcewake all wells, else re-direct to regular path */ |
| 3548 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | 3548 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); |
| 3549 | 3549 | ||
| @@ -4395,6 +4395,8 @@ void intel_enable_gt_powersave(struct drm_device *dev) | |||
| 4395 | ironlake_enable_rc6(dev); | 4395 | ironlake_enable_rc6(dev); |
| 4396 | intel_init_emon(dev); | 4396 | intel_init_emon(dev); |
| 4397 | } else if (IS_GEN6(dev) || IS_GEN7(dev)) { | 4397 | } else if (IS_GEN6(dev) || IS_GEN7(dev)) { |
| 4398 | if (IS_VALLEYVIEW(dev)) | ||
| 4399 | valleyview_setup_pctx(dev); | ||
| 4398 | /* | 4400 | /* |
| 4399 | * PCU communication is slow and this doesn't need to be | 4401 | * PCU communication is slow and this doesn't need to be |
| 4400 | * done at any specific time, so do this out of our fast path | 4402 | * done at any specific time, so do this out of our fast path |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 2cec2ab02f80..607dc14d195e 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
| @@ -1314,7 +1314,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
| 1314 | } | 1314 | } |
| 1315 | if (is_dp) | 1315 | if (is_dp) |
| 1316 | args.v5.ucLaneNum = dp_lane_count; | 1316 | args.v5.ucLaneNum = dp_lane_count; |
| 1317 | else if (radeon_encoder->pixel_clock > 165000) | 1317 | else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) |
| 1318 | args.v5.ucLaneNum = 8; | 1318 | args.v5.ucLaneNum = 8; |
| 1319 | else | 1319 | else |
| 1320 | args.v5.ucLaneNum = 4; | 1320 | args.v5.ucLaneNum = 4; |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e6419ca7cd37..bbb17841a9e5 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
| @@ -3046,7 +3046,7 @@ static u32 cik_create_bitmask(u32 bit_width) | |||
| 3046 | } | 3046 | } |
| 3047 | 3047 | ||
| 3048 | /** | 3048 | /** |
| 3049 | * cik_select_se_sh - select which SE, SH to address | 3049 | * cik_get_rb_disabled - computes the mask of disabled RBs |
| 3050 | * | 3050 | * |
| 3051 | * @rdev: radeon_device pointer | 3051 | * @rdev: radeon_device pointer |
| 3052 | * @max_rb_num: max RBs (render backends) for the asic | 3052 | * @max_rb_num: max RBs (render backends) for the asic |
| @@ -4134,8 +4134,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable) | |||
| 4134 | { | 4134 | { |
| 4135 | if (enable) | 4135 | if (enable) |
| 4136 | WREG32(CP_MEC_CNTL, 0); | 4136 | WREG32(CP_MEC_CNTL, 0); |
| 4137 | else | 4137 | else { |
| 4138 | WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); | 4138 | WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); |
| 4139 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; | ||
| 4140 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; | ||
| 4141 | } | ||
| 4139 | udelay(50); | 4142 | udelay(50); |
| 4140 | } | 4143 | } |
| 4141 | 4144 | ||
| @@ -7902,7 +7905,8 @@ int cik_resume(struct radeon_device *rdev) | |||
| 7902 | /* init golden registers */ | 7905 | /* init golden registers */ |
| 7903 | cik_init_golden_registers(rdev); | 7906 | cik_init_golden_registers(rdev); |
| 7904 | 7907 | ||
| 7905 | radeon_pm_resume(rdev); | 7908 | if (rdev->pm.pm_method == PM_METHOD_DPM) |
| 7909 | radeon_pm_resume(rdev); | ||
| 7906 | 7910 | ||
| 7907 | rdev->accel_working = true; | 7911 | rdev->accel_working = true; |
| 7908 | r = cik_startup(rdev); | 7912 | r = cik_startup(rdev); |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 1ecb3f1070e3..94626ea90fa5 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
| @@ -264,6 +264,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev) | |||
| 264 | WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); | 264 | WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); |
| 265 | WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); | 265 | WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); |
| 266 | } | 266 | } |
| 267 | rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; | ||
| 268 | rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; | ||
| 267 | } | 269 | } |
| 268 | 270 | ||
| 269 | /** | 271 | /** |
| @@ -291,6 +293,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable) | |||
| 291 | u32 me_cntl, reg_offset; | 293 | u32 me_cntl, reg_offset; |
| 292 | int i; | 294 | int i; |
| 293 | 295 | ||
| 296 | if (enable == false) { | ||
| 297 | cik_sdma_gfx_stop(rdev); | ||
| 298 | cik_sdma_rlc_stop(rdev); | ||
| 299 | } | ||
| 300 | |||
| 294 | for (i = 0; i < 2; i++) { | 301 | for (i = 0; i < 2; i++) { |
| 295 | if (i == 0) | 302 | if (i == 0) |
| 296 | reg_offset = SDMA0_REGISTER_OFFSET; | 303 | reg_offset = SDMA0_REGISTER_OFFSET; |
| @@ -420,10 +427,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev) | |||
| 420 | if (!rdev->sdma_fw) | 427 | if (!rdev->sdma_fw) |
| 421 | return -EINVAL; | 428 | return -EINVAL; |
| 422 | 429 | ||
| 423 | /* stop the gfx rings and rlc compute queues */ | ||
| 424 | cik_sdma_gfx_stop(rdev); | ||
| 425 | cik_sdma_rlc_stop(rdev); | ||
| 426 | |||
| 427 | /* halt the MEs */ | 430 | /* halt the MEs */ |
| 428 | cik_sdma_enable(rdev, false); | 431 | cik_sdma_enable(rdev, false); |
| 429 | 432 | ||
| @@ -492,9 +495,6 @@ int cik_sdma_resume(struct radeon_device *rdev) | |||
| 492 | */ | 495 | */ |
| 493 | void cik_sdma_fini(struct radeon_device *rdev) | 496 | void cik_sdma_fini(struct radeon_device *rdev) |
| 494 | { | 497 | { |
| 495 | /* stop the gfx rings and rlc compute queues */ | ||
| 496 | cik_sdma_gfx_stop(rdev); | ||
| 497 | cik_sdma_rlc_stop(rdev); | ||
| 498 | /* halt the MEs */ | 498 | /* halt the MEs */ |
| 499 | cik_sdma_enable(rdev, false); | 499 | cik_sdma_enable(rdev, false); |
| 500 | radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); | 500 | radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 8a2c010b7dc5..27b0ff16082e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -5299,7 +5299,8 @@ int evergreen_resume(struct radeon_device *rdev) | |||
| 5299 | /* init golden registers */ | 5299 | /* init golden registers */ |
| 5300 | evergreen_init_golden_registers(rdev); | 5300 | evergreen_init_golden_registers(rdev); |
| 5301 | 5301 | ||
| 5302 | radeon_pm_resume(rdev); | 5302 | if (rdev->pm.pm_method == PM_METHOD_DPM) |
| 5303 | radeon_pm_resume(rdev); | ||
| 5303 | 5304 | ||
| 5304 | rdev->accel_working = true; | 5305 | rdev->accel_working = true; |
| 5305 | r = evergreen_startup(rdev); | 5306 | r = evergreen_startup(rdev); |
diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h index 76ada8cfe902..3a03ba37d043 100644 --- a/drivers/gpu/drm/radeon/evergreen_smc.h +++ b/drivers/gpu/drm/radeon/evergreen_smc.h | |||
| @@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters; | |||
| 57 | 57 | ||
| 58 | #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100 | 58 | #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100 |
| 59 | 59 | ||
| 60 | #define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0 | 60 | #define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8 |
| 61 | #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC | 61 | #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC |
| 62 | #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20 | 62 | #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20 |
| 63 | 63 | ||
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index ea932ac66fc6..bf6300cfd62d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
| @@ -2105,7 +2105,8 @@ int cayman_resume(struct radeon_device *rdev) | |||
| 2105 | /* init golden registers */ | 2105 | /* init golden registers */ |
| 2106 | ni_init_golden_registers(rdev); | 2106 | ni_init_golden_registers(rdev); |
| 2107 | 2107 | ||
| 2108 | radeon_pm_resume(rdev); | 2108 | if (rdev->pm.pm_method == PM_METHOD_DPM) |
| 2109 | radeon_pm_resume(rdev); | ||
| 2109 | 2110 | ||
| 2110 | rdev->accel_working = true; | 2111 | rdev->accel_working = true; |
| 2111 | r = cayman_startup(rdev); | 2112 | r = cayman_startup(rdev); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index ef024ce3f7cc..3cc78bb66042 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -3942,8 +3942,6 @@ int r100_resume(struct radeon_device *rdev) | |||
| 3942 | /* Initialize surface registers */ | 3942 | /* Initialize surface registers */ |
| 3943 | radeon_surface_init(rdev); | 3943 | radeon_surface_init(rdev); |
| 3944 | 3944 | ||
| 3945 | radeon_pm_resume(rdev); | ||
| 3946 | |||
| 3947 | rdev->accel_working = true; | 3945 | rdev->accel_working = true; |
| 3948 | r = r100_startup(rdev); | 3946 | r = r100_startup(rdev); |
| 3949 | if (r) { | 3947 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 7c63ef840e86..0b658b34b33a 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -1430,8 +1430,6 @@ int r300_resume(struct radeon_device *rdev) | |||
| 1430 | /* Initialize surface registers */ | 1430 | /* Initialize surface registers */ |
| 1431 | radeon_surface_init(rdev); | 1431 | radeon_surface_init(rdev); |
| 1432 | 1432 | ||
| 1433 | radeon_pm_resume(rdev); | ||
| 1434 | |||
| 1435 | rdev->accel_working = true; | 1433 | rdev->accel_working = true; |
| 1436 | r = r300_startup(rdev); | 1434 | r = r300_startup(rdev); |
| 1437 | if (r) { | 1435 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 3768aab2710b..802b19220a21 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
| @@ -325,8 +325,6 @@ int r420_resume(struct radeon_device *rdev) | |||
| 325 | /* Initialize surface registers */ | 325 | /* Initialize surface registers */ |
| 326 | radeon_surface_init(rdev); | 326 | radeon_surface_init(rdev); |
| 327 | 327 | ||
| 328 | radeon_pm_resume(rdev); | ||
| 329 | |||
| 330 | rdev->accel_working = true; | 328 | rdev->accel_working = true; |
| 331 | r = r420_startup(rdev); | 329 | r = r420_startup(rdev); |
| 332 | if (r) { | 330 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index e209eb75024f..98d6053c36c6 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
| @@ -240,8 +240,6 @@ int r520_resume(struct radeon_device *rdev) | |||
| 240 | /* Initialize surface registers */ | 240 | /* Initialize surface registers */ |
| 241 | radeon_surface_init(rdev); | 241 | radeon_surface_init(rdev); |
| 242 | 242 | ||
| 243 | radeon_pm_resume(rdev); | ||
| 244 | |||
| 245 | rdev->accel_working = true; | 243 | rdev->accel_working = true; |
| 246 | r = r520_startup(rdev); | 244 | r = r520_startup(rdev); |
| 247 | if (r) { | 245 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index cdbc4171fe73..647ef4079217 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -2968,7 +2968,8 @@ int r600_resume(struct radeon_device *rdev) | |||
| 2968 | /* post card */ | 2968 | /* post card */ |
| 2969 | atom_asic_init(rdev->mode_info.atom_context); | 2969 | atom_asic_init(rdev->mode_info.atom_context); |
| 2970 | 2970 | ||
| 2971 | radeon_pm_resume(rdev); | 2971 | if (rdev->pm.pm_method == PM_METHOD_DPM) |
| 2972 | radeon_pm_resume(rdev); | ||
| 2972 | 2973 | ||
| 2973 | rdev->accel_working = true; | 2974 | rdev->accel_working = true; |
| 2974 | r = r600_startup(rdev); | 2975 | r = r600_startup(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b012cbbc3ed5..044bc98fb459 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -1521,13 +1521,16 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) | |||
| 1521 | if (r) | 1521 | if (r) |
| 1522 | DRM_ERROR("ib ring test failed (%d).\n", r); | 1522 | DRM_ERROR("ib ring test failed (%d).\n", r); |
| 1523 | 1523 | ||
| 1524 | if (rdev->pm.dpm_enabled) { | 1524 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { |
| 1525 | /* do dpm late init */ | 1525 | /* do dpm late init */ |
| 1526 | r = radeon_pm_late_init(rdev); | 1526 | r = radeon_pm_late_init(rdev); |
| 1527 | if (r) { | 1527 | if (r) { |
| 1528 | rdev->pm.dpm_enabled = false; | 1528 | rdev->pm.dpm_enabled = false; |
| 1529 | DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); | 1529 | DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); |
| 1530 | } | 1530 | } |
| 1531 | } else { | ||
| 1532 | /* resume old pm late */ | ||
| 1533 | radeon_pm_resume(rdev); | ||
| 1531 | } | 1534 | } |
| 1532 | 1535 | ||
| 1533 | radeon_restore_bios_scratch_regs(rdev); | 1536 | radeon_restore_bios_scratch_regs(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 2aecd6dc2610..66ed3ea71440 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
| @@ -33,6 +33,13 @@ | |||
| 33 | #include <linux/vga_switcheroo.h> | 33 | #include <linux/vga_switcheroo.h> |
| 34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
| 35 | #include <linux/pm_runtime.h> | 35 | #include <linux/pm_runtime.h> |
| 36 | |||
| 37 | #if defined(CONFIG_VGA_SWITCHEROO) | ||
| 38 | bool radeon_is_px(void); | ||
| 39 | #else | ||
| 40 | static inline bool radeon_is_px(void) { return false; } | ||
| 41 | #endif | ||
| 42 | |||
| 36 | /** | 43 | /** |
| 37 | * radeon_driver_unload_kms - Main unload function for KMS. | 44 | * radeon_driver_unload_kms - Main unload function for KMS. |
| 38 | * | 45 | * |
| @@ -130,7 +137,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
| 130 | "Error during ACPI methods call\n"); | 137 | "Error during ACPI methods call\n"); |
| 131 | } | 138 | } |
| 132 | 139 | ||
| 133 | if (radeon_runtime_pm != 0) { | 140 | if ((radeon_runtime_pm == 1) || |
| 141 | ((radeon_runtime_pm == -1) && radeon_is_px())) { | ||
| 134 | pm_runtime_use_autosuspend(dev->dev); | 142 | pm_runtime_use_autosuspend(dev->dev); |
| 135 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); | 143 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); |
| 136 | pm_runtime_set_active(dev->dev); | 144 | pm_runtime_set_active(dev->dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 77f5b0c3edb8..040a2a10ea17 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
| @@ -714,6 +714,9 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
| 714 | DRM_ERROR("Failed initializing VRAM heap.\n"); | 714 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
| 715 | return r; | 715 | return r; |
| 716 | } | 716 | } |
| 717 | /* Change the size here instead of the init above so only lpfn is affected */ | ||
| 718 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
| 719 | |||
| 717 | r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, | 720 | r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, |
| 718 | RADEON_GEM_DOMAIN_VRAM, | 721 | RADEON_GEM_DOMAIN_VRAM, |
| 719 | NULL, &rdev->stollen_vga_memory); | 722 | NULL, &rdev->stollen_vga_memory); |
| @@ -935,7 +938,7 @@ static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf, | |||
| 935 | while (size) { | 938 | while (size) { |
| 936 | loff_t p = *pos / PAGE_SIZE; | 939 | loff_t p = *pos / PAGE_SIZE; |
| 937 | unsigned off = *pos & ~PAGE_MASK; | 940 | unsigned off = *pos & ~PAGE_MASK; |
| 938 | ssize_t cur_size = min(size, PAGE_SIZE - off); | 941 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); |
| 939 | struct page *page; | 942 | struct page *page; |
| 940 | void *ptr; | 943 | void *ptr; |
| 941 | 944 | ||
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index b5c2369cda2f..130d5cc50d43 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
| @@ -474,8 +474,6 @@ int rs400_resume(struct radeon_device *rdev) | |||
| 474 | /* Initialize surface registers */ | 474 | /* Initialize surface registers */ |
| 475 | radeon_surface_init(rdev); | 475 | radeon_surface_init(rdev); |
| 476 | 476 | ||
| 477 | radeon_pm_resume(rdev); | ||
| 478 | |||
| 479 | rdev->accel_working = true; | 477 | rdev->accel_working = true; |
| 480 | r = rs400_startup(rdev); | 478 | r = rs400_startup(rdev); |
| 481 | if (r) { | 479 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index fdcde7693032..72d3616de08e 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -1048,8 +1048,6 @@ int rs600_resume(struct radeon_device *rdev) | |||
| 1048 | /* Initialize surface registers */ | 1048 | /* Initialize surface registers */ |
| 1049 | radeon_surface_init(rdev); | 1049 | radeon_surface_init(rdev); |
| 1050 | 1050 | ||
| 1051 | radeon_pm_resume(rdev); | ||
| 1052 | |||
| 1053 | rdev->accel_working = true; | 1051 | rdev->accel_working = true; |
| 1054 | r = rs600_startup(rdev); | 1052 | r = rs600_startup(rdev); |
| 1055 | if (r) { | 1053 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 35950738bd5e..3462b64369bf 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
| @@ -756,8 +756,6 @@ int rs690_resume(struct radeon_device *rdev) | |||
| 756 | /* Initialize surface registers */ | 756 | /* Initialize surface registers */ |
| 757 | radeon_surface_init(rdev); | 757 | radeon_surface_init(rdev); |
| 758 | 758 | ||
| 759 | radeon_pm_resume(rdev); | ||
| 760 | |||
| 761 | rdev->accel_working = true; | 759 | rdev->accel_working = true; |
| 762 | r = rs690_startup(rdev); | 760 | r = rs690_startup(rdev); |
| 763 | if (r) { | 761 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 98e8138ff779..237dd29d9f1c 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
| @@ -586,8 +586,6 @@ int rv515_resume(struct radeon_device *rdev) | |||
| 586 | /* Initialize surface registers */ | 586 | /* Initialize surface registers */ |
| 587 | radeon_surface_init(rdev); | 587 | radeon_surface_init(rdev); |
| 588 | 588 | ||
| 589 | radeon_pm_resume(rdev); | ||
| 590 | |||
| 591 | rdev->accel_working = true; | 589 | rdev->accel_working = true; |
| 592 | r = rv515_startup(rdev); | 590 | r = rv515_startup(rdev); |
| 593 | if (r) { | 591 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4e37a42305d8..fef310773aad 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -1811,7 +1811,8 @@ int rv770_resume(struct radeon_device *rdev) | |||
| 1811 | /* init golden registers */ | 1811 | /* init golden registers */ |
| 1812 | rv770_init_golden_registers(rdev); | 1812 | rv770_init_golden_registers(rdev); |
| 1813 | 1813 | ||
| 1814 | radeon_pm_resume(rdev); | 1814 | if (rdev->pm.pm_method == PM_METHOD_DPM) |
| 1815 | radeon_pm_resume(rdev); | ||
| 1815 | 1816 | ||
| 1816 | rdev->accel_working = true; | 1817 | rdev->accel_working = true; |
| 1817 | r = rv770_startup(rdev); | 1818 | r = rv770_startup(rdev); |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 83578324e5d1..9a124d0608b3 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -6618,7 +6618,8 @@ int si_resume(struct radeon_device *rdev) | |||
| 6618 | /* init golden registers */ | 6618 | /* init golden registers */ |
| 6619 | si_init_golden_registers(rdev); | 6619 | si_init_golden_registers(rdev); |
| 6620 | 6620 | ||
| 6621 | radeon_pm_resume(rdev); | 6621 | if (rdev->pm.pm_method == PM_METHOD_DPM) |
| 6622 | radeon_pm_resume(rdev); | ||
| 6622 | 6623 | ||
| 6623 | rdev->accel_working = true; | 6624 | rdev->accel_working = true; |
| 6624 | r = si_startup(rdev); | 6625 | r = si_startup(rdev); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a06651309388..214b7992a3aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
| @@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
| 351 | 351 | ||
| 352 | moved: | 352 | moved: |
| 353 | if (bo->evicted) { | 353 | if (bo->evicted) { |
| 354 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); | 354 | if (bdev->driver->invalidate_caches) { |
| 355 | if (ret) | 355 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); |
| 356 | pr_err("Can not flush read caches\n"); | 356 | if (ret) |
| 357 | pr_err("Can not flush read caches\n"); | ||
| 358 | } | ||
| 357 | bo->evicted = false; | 359 | bo->evicted = false; |
| 358 | } | 360 | } |
| 359 | 361 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 801231c9ae48..0ce48e5a9cb4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
| @@ -339,11 +339,13 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, | |||
| 339 | vma->vm_private_data = bo; | 339 | vma->vm_private_data = bo; |
| 340 | 340 | ||
| 341 | /* | 341 | /* |
| 342 | * PFNMAP is faster than MIXEDMAP due to reduced page | 342 | * We'd like to use VM_PFNMAP on shared mappings, where |
| 343 | * administration. So use MIXEDMAP only if private VMA, where | 343 | * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, |
| 344 | * we need to support COW. | 344 | * but for some reason VM_PFNMAP + x86 PAT + write-combine is very |
| 345 | * bad for performance. Until that has been sorted out, use | ||
| 346 | * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 | ||
| 345 | */ | 347 | */ |
| 346 | vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; | 348 | vma->vm_flags |= VM_MIXEDMAP; |
| 347 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; | 349 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; |
| 348 | return 0; | 350 | return 0; |
| 349 | out_unref: | 351 | out_unref: |
| @@ -359,7 +361,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) | |||
| 359 | 361 | ||
| 360 | vma->vm_ops = &ttm_bo_vm_ops; | 362 | vma->vm_ops = &ttm_bo_vm_ops; |
| 361 | vma->vm_private_data = ttm_bo_reference(bo); | 363 | vma->vm_private_data = ttm_bo_reference(bo); |
| 362 | vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; | 364 | vma->vm_flags |= VM_MIXEDMAP; |
| 363 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; | 365 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; |
| 364 | return 0; | 366 | return 0; |
| 365 | } | 367 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 82468d902915..e7af580ab977 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
| @@ -830,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 830 | if (unlikely(ret != 0)) | 830 | if (unlikely(ret != 0)) |
| 831 | goto out_unlock; | 831 | goto out_unlock; |
| 832 | 832 | ||
| 833 | /* | ||
| 834 | * A gb-aware client referencing a shared surface will | ||
| 835 | * expect a backup buffer to be present. | ||
| 836 | */ | ||
| 837 | if (dev_priv->has_mob && req->shareable) { | ||
| 838 | uint32_t backup_handle; | ||
| 839 | |||
| 840 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, | ||
| 841 | res->backup_size, | ||
| 842 | true, | ||
| 843 | &backup_handle, | ||
| 844 | &res->backup); | ||
| 845 | if (unlikely(ret != 0)) { | ||
| 846 | vmw_resource_unreference(&res); | ||
| 847 | goto out_unlock; | ||
| 848 | } | ||
| 849 | } | ||
| 850 | |||
| 833 | tmp = vmw_resource_reference(&srf->res); | 851 | tmp = vmw_resource_reference(&srf->res); |
| 834 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, | 852 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, |
| 835 | req->shareable, VMW_RES_SURFACE, | 853 | req->shareable, VMW_RES_SURFACE, |
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c index befe0e336471..24883b4d1a49 100644 --- a/drivers/hid/hid-lg4ff.c +++ b/drivers/hid/hid-lg4ff.c | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #define G25_REV_MIN 0x22 | 43 | #define G25_REV_MIN 0x22 |
| 44 | #define G27_REV_MAJ 0x12 | 44 | #define G27_REV_MAJ 0x12 |
| 45 | #define G27_REV_MIN 0x38 | 45 | #define G27_REV_MIN 0x38 |
| 46 | #define G27_2_REV_MIN 0x39 | ||
| 46 | 47 | ||
| 47 | #define to_hid_device(pdev) container_of(pdev, struct hid_device, dev) | 48 | #define to_hid_device(pdev) container_of(pdev, struct hid_device, dev) |
| 48 | 49 | ||
| @@ -130,6 +131,7 @@ static const struct lg4ff_usb_revision lg4ff_revs[] = { | |||
| 130 | {DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */ | 131 | {DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */ |
| 131 | {G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */ | 132 | {G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */ |
| 132 | {G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */ | 133 | {G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */ |
| 134 | {G27_REV_MAJ, G27_2_REV_MIN, &native_g27}, /* G27 v2 */ | ||
| 133 | }; | 135 | }; |
| 134 | 136 | ||
| 135 | /* Recalculates X axis value accordingly to currently selected range */ | 137 | /* Recalculates X axis value accordingly to currently selected range */ |
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 12354055d474..2f19b15f47f2 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | #define DUALSHOCK4_CONTROLLER_BT BIT(6) | 42 | #define DUALSHOCK4_CONTROLLER_BT BIT(6) |
| 43 | 43 | ||
| 44 | #define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB) | 44 | #define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB) |
| 45 | #define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER_USB | DUALSHOCK4_CONTROLLER_USB) | ||
| 45 | 46 | ||
| 46 | #define MAX_LEDS 4 | 47 | #define MAX_LEDS 4 |
| 47 | 48 | ||
| @@ -499,6 +500,7 @@ struct sony_sc { | |||
| 499 | __u8 right; | 500 | __u8 right; |
| 500 | #endif | 501 | #endif |
| 501 | 502 | ||
| 503 | __u8 worker_initialized; | ||
| 502 | __u8 led_state[MAX_LEDS]; | 504 | __u8 led_state[MAX_LEDS]; |
| 503 | __u8 led_count; | 505 | __u8 led_count; |
| 504 | }; | 506 | }; |
| @@ -993,22 +995,11 @@ static int sony_init_ff(struct hid_device *hdev) | |||
| 993 | return input_ff_create_memless(input_dev, NULL, sony_play_effect); | 995 | return input_ff_create_memless(input_dev, NULL, sony_play_effect); |
| 994 | } | 996 | } |
| 995 | 997 | ||
| 996 | static void sony_destroy_ff(struct hid_device *hdev) | ||
| 997 | { | ||
| 998 | struct sony_sc *sc = hid_get_drvdata(hdev); | ||
| 999 | |||
| 1000 | cancel_work_sync(&sc->state_worker); | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | #else | 998 | #else |
| 1004 | static int sony_init_ff(struct hid_device *hdev) | 999 | static int sony_init_ff(struct hid_device *hdev) |
| 1005 | { | 1000 | { |
| 1006 | return 0; | 1001 | return 0; |
| 1007 | } | 1002 | } |
| 1008 | |||
| 1009 | static void sony_destroy_ff(struct hid_device *hdev) | ||
| 1010 | { | ||
| 1011 | } | ||
| 1012 | #endif | 1003 | #endif |
| 1013 | 1004 | ||
| 1014 | static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size) | 1005 | static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size) |
| @@ -1077,6 +1068,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
| 1077 | if (sc->quirks & SIXAXIS_CONTROLLER_USB) { | 1068 | if (sc->quirks & SIXAXIS_CONTROLLER_USB) { |
| 1078 | hdev->hid_output_raw_report = sixaxis_usb_output_raw_report; | 1069 | hdev->hid_output_raw_report = sixaxis_usb_output_raw_report; |
| 1079 | ret = sixaxis_set_operational_usb(hdev); | 1070 | ret = sixaxis_set_operational_usb(hdev); |
| 1071 | |||
| 1072 | sc->worker_initialized = 1; | ||
| 1080 | INIT_WORK(&sc->state_worker, sixaxis_state_worker); | 1073 | INIT_WORK(&sc->state_worker, sixaxis_state_worker); |
| 1081 | } | 1074 | } |
| 1082 | else if (sc->quirks & SIXAXIS_CONTROLLER_BT) | 1075 | else if (sc->quirks & SIXAXIS_CONTROLLER_BT) |
| @@ -1087,6 +1080,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
| 1087 | if (ret < 0) | 1080 | if (ret < 0) |
| 1088 | goto err_stop; | 1081 | goto err_stop; |
| 1089 | 1082 | ||
| 1083 | sc->worker_initialized = 1; | ||
| 1090 | INIT_WORK(&sc->state_worker, dualshock4_state_worker); | 1084 | INIT_WORK(&sc->state_worker, dualshock4_state_worker); |
| 1091 | } else { | 1085 | } else { |
| 1092 | ret = 0; | 1086 | ret = 0; |
| @@ -1101,9 +1095,11 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
| 1101 | goto err_stop; | 1095 | goto err_stop; |
| 1102 | } | 1096 | } |
| 1103 | 1097 | ||
| 1104 | ret = sony_init_ff(hdev); | 1098 | if (sc->quirks & SONY_FF_SUPPORT) { |
| 1105 | if (ret < 0) | 1099 | ret = sony_init_ff(hdev); |
| 1106 | goto err_stop; | 1100 | if (ret < 0) |
| 1101 | goto err_stop; | ||
| 1102 | } | ||
| 1107 | 1103 | ||
| 1108 | return 0; | 1104 | return 0; |
| 1109 | err_stop: | 1105 | err_stop: |
| @@ -1120,7 +1116,8 @@ static void sony_remove(struct hid_device *hdev) | |||
| 1120 | if (sc->quirks & SONY_LED_SUPPORT) | 1116 | if (sc->quirks & SONY_LED_SUPPORT) |
| 1121 | sony_leds_remove(hdev); | 1117 | sony_leds_remove(hdev); |
| 1122 | 1118 | ||
| 1123 | sony_destroy_ff(hdev); | 1119 | if (sc->worker_initialized) |
| 1120 | cancel_work_sync(&sc->state_worker); | ||
| 1124 | 1121 | ||
| 1125 | hid_hw_stop(hdev); | 1122 | hid_hw_stop(hdev); |
| 1126 | } | 1123 | } |
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index cb0137b3718d..ab24ce2eb28f 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c | |||
| @@ -320,13 +320,13 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit) | |||
| 320 | hid_hw_close(hidraw->hid); | 320 | hid_hw_close(hidraw->hid); |
| 321 | wake_up_interruptible(&hidraw->wait); | 321 | wake_up_interruptible(&hidraw->wait); |
| 322 | } | 322 | } |
| 323 | device_destroy(hidraw_class, | ||
| 324 | MKDEV(hidraw_major, hidraw->minor)); | ||
| 323 | } else { | 325 | } else { |
| 324 | --hidraw->open; | 326 | --hidraw->open; |
| 325 | } | 327 | } |
| 326 | if (!hidraw->open) { | 328 | if (!hidraw->open) { |
| 327 | if (!hidraw->exist) { | 329 | if (!hidraw->exist) { |
| 328 | device_destroy(hidraw_class, | ||
| 329 | MKDEV(hidraw_major, hidraw->minor)); | ||
| 330 | hidraw_table[hidraw->minor] = NULL; | 330 | hidraw_table[hidraw->minor] = NULL; |
| 331 | kfree(hidraw); | 331 | kfree(hidraw); |
| 332 | } else { | 332 | } else { |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index f5ed03164d86..de17c5593d97 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
| @@ -387,7 +387,7 @@ config I2C_CBUS_GPIO | |||
| 387 | 387 | ||
| 388 | config I2C_CPM | 388 | config I2C_CPM |
| 389 | tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)" | 389 | tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)" |
| 390 | depends on (CPM1 || CPM2) && OF_I2C | 390 | depends on CPM1 || CPM2 |
| 391 | help | 391 | help |
| 392 | This supports the use of the I2C interface on Freescale | 392 | This supports the use of the I2C interface on Freescale |
| 393 | processors with CPM1 or CPM2. | 393 | processors with CPM1 or CPM2. |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index e81c5547e647..f9c12e92fdd6 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -53,8 +53,8 @@ | |||
| 53 | #include "user.h" | 53 | #include "user.h" |
| 54 | 54 | ||
| 55 | #define DRV_NAME MLX4_IB_DRV_NAME | 55 | #define DRV_NAME MLX4_IB_DRV_NAME |
| 56 | #define DRV_VERSION "1.0" | 56 | #define DRV_VERSION "2.2-1" |
| 57 | #define DRV_RELDATE "April 4, 2008" | 57 | #define DRV_RELDATE "Feb 2014" |
| 58 | 58 | ||
| 59 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF | 59 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF |
| 60 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF | 60 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index aa03e732b6a8..bf900579ac08 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -46,8 +46,8 @@ | |||
| 46 | #include "mlx5_ib.h" | 46 | #include "mlx5_ib.h" |
| 47 | 47 | ||
| 48 | #define DRIVER_NAME "mlx5_ib" | 48 | #define DRIVER_NAME "mlx5_ib" |
| 49 | #define DRIVER_VERSION "1.0" | 49 | #define DRIVER_VERSION "2.2-1" |
| 50 | #define DRIVER_RELDATE "June 2013" | 50 | #define DRIVER_RELDATE "Feb 2014" |
| 51 | 51 | ||
| 52 | MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); | 52 | MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); |
| 53 | MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); | 53 | MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index d18d08a076e8..8ee228e9ab5a 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
| @@ -492,12 +492,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
| 492 | isert_conn->state = ISER_CONN_INIT; | 492 | isert_conn->state = ISER_CONN_INIT; |
| 493 | INIT_LIST_HEAD(&isert_conn->conn_accept_node); | 493 | INIT_LIST_HEAD(&isert_conn->conn_accept_node); |
| 494 | init_completion(&isert_conn->conn_login_comp); | 494 | init_completion(&isert_conn->conn_login_comp); |
| 495 | init_waitqueue_head(&isert_conn->conn_wait); | 495 | init_completion(&isert_conn->conn_wait); |
| 496 | init_waitqueue_head(&isert_conn->conn_wait_comp_err); | 496 | init_completion(&isert_conn->conn_wait_comp_err); |
| 497 | kref_init(&isert_conn->conn_kref); | 497 | kref_init(&isert_conn->conn_kref); |
| 498 | kref_get(&isert_conn->conn_kref); | 498 | kref_get(&isert_conn->conn_kref); |
| 499 | mutex_init(&isert_conn->conn_mutex); | 499 | mutex_init(&isert_conn->conn_mutex); |
| 500 | mutex_init(&isert_conn->conn_comp_mutex); | ||
| 501 | spin_lock_init(&isert_conn->conn_lock); | 500 | spin_lock_init(&isert_conn->conn_lock); |
| 502 | 501 | ||
| 503 | cma_id->context = isert_conn; | 502 | cma_id->context = isert_conn; |
| @@ -688,11 +687,11 @@ isert_disconnect_work(struct work_struct *work) | |||
| 688 | 687 | ||
| 689 | pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); | 688 | pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); |
| 690 | mutex_lock(&isert_conn->conn_mutex); | 689 | mutex_lock(&isert_conn->conn_mutex); |
| 691 | isert_conn->state = ISER_CONN_DOWN; | 690 | if (isert_conn->state == ISER_CONN_UP) |
| 691 | isert_conn->state = ISER_CONN_TERMINATING; | ||
| 692 | 692 | ||
| 693 | if (isert_conn->post_recv_buf_count == 0 && | 693 | if (isert_conn->post_recv_buf_count == 0 && |
| 694 | atomic_read(&isert_conn->post_send_buf_count) == 0) { | 694 | atomic_read(&isert_conn->post_send_buf_count) == 0) { |
| 695 | pr_debug("Calling wake_up(&isert_conn->conn_wait);\n"); | ||
| 696 | mutex_unlock(&isert_conn->conn_mutex); | 695 | mutex_unlock(&isert_conn->conn_mutex); |
| 697 | goto wake_up; | 696 | goto wake_up; |
| 698 | } | 697 | } |
| @@ -712,7 +711,7 @@ isert_disconnect_work(struct work_struct *work) | |||
| 712 | mutex_unlock(&isert_conn->conn_mutex); | 711 | mutex_unlock(&isert_conn->conn_mutex); |
| 713 | 712 | ||
| 714 | wake_up: | 713 | wake_up: |
| 715 | wake_up(&isert_conn->conn_wait); | 714 | complete(&isert_conn->conn_wait); |
| 716 | isert_put_conn(isert_conn); | 715 | isert_put_conn(isert_conn); |
| 717 | } | 716 | } |
| 718 | 717 | ||
| @@ -888,16 +887,17 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | |||
| 888 | * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED | 887 | * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED |
| 889 | * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. | 888 | * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. |
| 890 | */ | 889 | */ |
| 891 | mutex_lock(&isert_conn->conn_comp_mutex); | 890 | mutex_lock(&isert_conn->conn_mutex); |
| 892 | if (coalesce && | 891 | if (coalesce && isert_conn->state == ISER_CONN_UP && |
| 893 | ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { | 892 | ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { |
| 893 | tx_desc->llnode_active = true; | ||
| 894 | llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); | 894 | llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); |
| 895 | mutex_unlock(&isert_conn->conn_comp_mutex); | 895 | mutex_unlock(&isert_conn->conn_mutex); |
| 896 | return; | 896 | return; |
| 897 | } | 897 | } |
| 898 | isert_conn->conn_comp_batch = 0; | 898 | isert_conn->conn_comp_batch = 0; |
| 899 | tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); | 899 | tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); |
| 900 | mutex_unlock(&isert_conn->conn_comp_mutex); | 900 | mutex_unlock(&isert_conn->conn_mutex); |
| 901 | 901 | ||
| 902 | send_wr->send_flags = IB_SEND_SIGNALED; | 902 | send_wr->send_flags = IB_SEND_SIGNALED; |
| 903 | } | 903 | } |
| @@ -1464,7 +1464,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd) | |||
| 1464 | case ISCSI_OP_SCSI_CMD: | 1464 | case ISCSI_OP_SCSI_CMD: |
| 1465 | spin_lock_bh(&conn->cmd_lock); | 1465 | spin_lock_bh(&conn->cmd_lock); |
| 1466 | if (!list_empty(&cmd->i_conn_node)) | 1466 | if (!list_empty(&cmd->i_conn_node)) |
| 1467 | list_del(&cmd->i_conn_node); | 1467 | list_del_init(&cmd->i_conn_node); |
| 1468 | spin_unlock_bh(&conn->cmd_lock); | 1468 | spin_unlock_bh(&conn->cmd_lock); |
| 1469 | 1469 | ||
| 1470 | if (cmd->data_direction == DMA_TO_DEVICE) | 1470 | if (cmd->data_direction == DMA_TO_DEVICE) |
| @@ -1476,7 +1476,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd) | |||
| 1476 | case ISCSI_OP_SCSI_TMFUNC: | 1476 | case ISCSI_OP_SCSI_TMFUNC: |
| 1477 | spin_lock_bh(&conn->cmd_lock); | 1477 | spin_lock_bh(&conn->cmd_lock); |
| 1478 | if (!list_empty(&cmd->i_conn_node)) | 1478 | if (!list_empty(&cmd->i_conn_node)) |
| 1479 | list_del(&cmd->i_conn_node); | 1479 | list_del_init(&cmd->i_conn_node); |
| 1480 | spin_unlock_bh(&conn->cmd_lock); | 1480 | spin_unlock_bh(&conn->cmd_lock); |
| 1481 | 1481 | ||
| 1482 | transport_generic_free_cmd(&cmd->se_cmd, 0); | 1482 | transport_generic_free_cmd(&cmd->se_cmd, 0); |
| @@ -1486,7 +1486,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd) | |||
| 1486 | case ISCSI_OP_TEXT: | 1486 | case ISCSI_OP_TEXT: |
| 1487 | spin_lock_bh(&conn->cmd_lock); | 1487 | spin_lock_bh(&conn->cmd_lock); |
| 1488 | if (!list_empty(&cmd->i_conn_node)) | 1488 | if (!list_empty(&cmd->i_conn_node)) |
| 1489 | list_del(&cmd->i_conn_node); | 1489 | list_del_init(&cmd->i_conn_node); |
| 1490 | spin_unlock_bh(&conn->cmd_lock); | 1490 | spin_unlock_bh(&conn->cmd_lock); |
| 1491 | 1491 | ||
| 1492 | /* | 1492 | /* |
| @@ -1549,6 +1549,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, | |||
| 1549 | iscsit_stop_dataout_timer(cmd); | 1549 | iscsit_stop_dataout_timer(cmd); |
| 1550 | device->unreg_rdma_mem(isert_cmd, isert_conn); | 1550 | device->unreg_rdma_mem(isert_cmd, isert_conn); |
| 1551 | cmd->write_data_done = wr->cur_rdma_length; | 1551 | cmd->write_data_done = wr->cur_rdma_length; |
| 1552 | wr->send_wr_num = 0; | ||
| 1552 | 1553 | ||
| 1553 | pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); | 1554 | pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); |
| 1554 | spin_lock_bh(&cmd->istate_lock); | 1555 | spin_lock_bh(&cmd->istate_lock); |
| @@ -1589,7 +1590,7 @@ isert_do_control_comp(struct work_struct *work) | |||
| 1589 | pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); | 1590 | pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); |
| 1590 | /* | 1591 | /* |
| 1591 | * Call atomic_dec(&isert_conn->post_send_buf_count) | 1592 | * Call atomic_dec(&isert_conn->post_send_buf_count) |
| 1592 | * from isert_free_conn() | 1593 | * from isert_wait_conn() |
| 1593 | */ | 1594 | */ |
| 1594 | isert_conn->logout_posted = true; | 1595 | isert_conn->logout_posted = true; |
| 1595 | iscsit_logout_post_handler(cmd, cmd->conn); | 1596 | iscsit_logout_post_handler(cmd, cmd->conn); |
| @@ -1613,6 +1614,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc, | |||
| 1613 | struct ib_device *ib_dev) | 1614 | struct ib_device *ib_dev) |
| 1614 | { | 1615 | { |
| 1615 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; | 1616 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; |
| 1617 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | ||
| 1616 | 1618 | ||
| 1617 | if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || | 1619 | if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || |
| 1618 | cmd->i_state == ISTATE_SEND_LOGOUTRSP || | 1620 | cmd->i_state == ISTATE_SEND_LOGOUTRSP || |
| @@ -1624,7 +1626,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc, | |||
| 1624 | queue_work(isert_comp_wq, &isert_cmd->comp_work); | 1626 | queue_work(isert_comp_wq, &isert_cmd->comp_work); |
| 1625 | return; | 1627 | return; |
| 1626 | } | 1628 | } |
| 1627 | atomic_dec(&isert_conn->post_send_buf_count); | 1629 | atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); |
| 1628 | 1630 | ||
| 1629 | cmd->i_state = ISTATE_SENT_STATUS; | 1631 | cmd->i_state = ISTATE_SENT_STATUS; |
| 1630 | isert_completion_put(tx_desc, isert_cmd, ib_dev); | 1632 | isert_completion_put(tx_desc, isert_cmd, ib_dev); |
| @@ -1662,7 +1664,7 @@ __isert_send_completion(struct iser_tx_desc *tx_desc, | |||
| 1662 | case ISER_IB_RDMA_READ: | 1664 | case ISER_IB_RDMA_READ: |
| 1663 | pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); | 1665 | pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); |
| 1664 | 1666 | ||
| 1665 | atomic_dec(&isert_conn->post_send_buf_count); | 1667 | atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); |
| 1666 | isert_completion_rdma_read(tx_desc, isert_cmd); | 1668 | isert_completion_rdma_read(tx_desc, isert_cmd); |
| 1667 | break; | 1669 | break; |
| 1668 | default: | 1670 | default: |
| @@ -1691,31 +1693,76 @@ isert_send_completion(struct iser_tx_desc *tx_desc, | |||
| 1691 | } | 1693 | } |
| 1692 | 1694 | ||
| 1693 | static void | 1695 | static void |
| 1694 | isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) | 1696 | isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev) |
| 1697 | { | ||
| 1698 | struct llist_node *llnode; | ||
| 1699 | struct isert_rdma_wr *wr; | ||
| 1700 | struct iser_tx_desc *t; | ||
| 1701 | |||
| 1702 | mutex_lock(&isert_conn->conn_mutex); | ||
| 1703 | llnode = llist_del_all(&isert_conn->conn_comp_llist); | ||
| 1704 | isert_conn->conn_comp_batch = 0; | ||
| 1705 | mutex_unlock(&isert_conn->conn_mutex); | ||
| 1706 | |||
| 1707 | while (llnode) { | ||
| 1708 | t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); | ||
| 1709 | llnode = llist_next(llnode); | ||
| 1710 | wr = &t->isert_cmd->rdma_wr; | ||
| 1711 | |||
| 1712 | atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); | ||
| 1713 | isert_completion_put(t, t->isert_cmd, ib_dev); | ||
| 1714 | } | ||
| 1715 | } | ||
| 1716 | |||
| 1717 | static void | ||
| 1718 | isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) | ||
| 1695 | { | 1719 | { |
| 1696 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1720 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
| 1721 | struct isert_cmd *isert_cmd = tx_desc->isert_cmd; | ||
| 1722 | struct llist_node *llnode = tx_desc->comp_llnode_batch; | ||
| 1723 | struct isert_rdma_wr *wr; | ||
| 1724 | struct iser_tx_desc *t; | ||
| 1697 | 1725 | ||
| 1698 | if (tx_desc) { | 1726 | while (llnode) { |
| 1699 | struct isert_cmd *isert_cmd = tx_desc->isert_cmd; | 1727 | t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); |
| 1728 | llnode = llist_next(llnode); | ||
| 1729 | wr = &t->isert_cmd->rdma_wr; | ||
| 1700 | 1730 | ||
| 1701 | if (!isert_cmd) | 1731 | atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); |
| 1702 | isert_unmap_tx_desc(tx_desc, ib_dev); | 1732 | isert_completion_put(t, t->isert_cmd, ib_dev); |
| 1703 | else | ||
| 1704 | isert_completion_put(tx_desc, isert_cmd, ib_dev); | ||
| 1705 | } | 1733 | } |
| 1734 | tx_desc->comp_llnode_batch = NULL; | ||
| 1706 | 1735 | ||
| 1707 | if (isert_conn->post_recv_buf_count == 0 && | 1736 | if (!isert_cmd) |
| 1708 | atomic_read(&isert_conn->post_send_buf_count) == 0) { | 1737 | isert_unmap_tx_desc(tx_desc, ib_dev); |
| 1709 | pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); | 1738 | else |
| 1710 | pr_debug("Calling wake_up from isert_cq_comp_err\n"); | 1739 | isert_completion_put(tx_desc, isert_cmd, ib_dev); |
| 1740 | } | ||
| 1711 | 1741 | ||
| 1712 | mutex_lock(&isert_conn->conn_mutex); | 1742 | static void |
| 1713 | if (isert_conn->state != ISER_CONN_DOWN) | 1743 | isert_cq_rx_comp_err(struct isert_conn *isert_conn) |
| 1714 | isert_conn->state = ISER_CONN_TERMINATING; | 1744 | { |
| 1715 | mutex_unlock(&isert_conn->conn_mutex); | 1745 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
| 1746 | struct iscsi_conn *conn = isert_conn->conn; | ||
| 1716 | 1747 | ||
| 1717 | wake_up(&isert_conn->conn_wait_comp_err); | 1748 | if (isert_conn->post_recv_buf_count) |
| 1749 | return; | ||
| 1750 | |||
| 1751 | isert_cq_drain_comp_llist(isert_conn, ib_dev); | ||
| 1752 | |||
| 1753 | if (conn->sess) { | ||
| 1754 | target_sess_cmd_list_set_waiting(conn->sess->se_sess); | ||
| 1755 | target_wait_for_sess_cmds(conn->sess->se_sess); | ||
| 1718 | } | 1756 | } |
| 1757 | |||
| 1758 | while (atomic_read(&isert_conn->post_send_buf_count)) | ||
| 1759 | msleep(3000); | ||
| 1760 | |||
| 1761 | mutex_lock(&isert_conn->conn_mutex); | ||
| 1762 | isert_conn->state = ISER_CONN_DOWN; | ||
| 1763 | mutex_unlock(&isert_conn->conn_mutex); | ||
| 1764 | |||
| 1765 | complete(&isert_conn->conn_wait_comp_err); | ||
| 1719 | } | 1766 | } |
| 1720 | 1767 | ||
| 1721 | static void | 1768 | static void |
| @@ -1740,8 +1787,14 @@ isert_cq_tx_work(struct work_struct *work) | |||
| 1740 | pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); | 1787 | pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); |
| 1741 | pr_debug("TX wc.status: 0x%08x\n", wc.status); | 1788 | pr_debug("TX wc.status: 0x%08x\n", wc.status); |
| 1742 | pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); | 1789 | pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); |
| 1743 | atomic_dec(&isert_conn->post_send_buf_count); | 1790 | |
| 1744 | isert_cq_comp_err(tx_desc, isert_conn); | 1791 | if (wc.wr_id != ISER_FASTREG_LI_WRID) { |
| 1792 | if (tx_desc->llnode_active) | ||
| 1793 | continue; | ||
| 1794 | |||
| 1795 | atomic_dec(&isert_conn->post_send_buf_count); | ||
| 1796 | isert_cq_tx_comp_err(tx_desc, isert_conn); | ||
| 1797 | } | ||
| 1745 | } | 1798 | } |
| 1746 | } | 1799 | } |
| 1747 | 1800 | ||
| @@ -1784,7 +1837,7 @@ isert_cq_rx_work(struct work_struct *work) | |||
| 1784 | wc.vendor_err); | 1837 | wc.vendor_err); |
| 1785 | } | 1838 | } |
| 1786 | isert_conn->post_recv_buf_count--; | 1839 | isert_conn->post_recv_buf_count--; |
| 1787 | isert_cq_comp_err(NULL, isert_conn); | 1840 | isert_cq_rx_comp_err(isert_conn); |
| 1788 | } | 1841 | } |
| 1789 | } | 1842 | } |
| 1790 | 1843 | ||
| @@ -2202,6 +2255,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, | |||
| 2202 | 2255 | ||
| 2203 | if (!fr_desc->valid) { | 2256 | if (!fr_desc->valid) { |
| 2204 | memset(&inv_wr, 0, sizeof(inv_wr)); | 2257 | memset(&inv_wr, 0, sizeof(inv_wr)); |
| 2258 | inv_wr.wr_id = ISER_FASTREG_LI_WRID; | ||
| 2205 | inv_wr.opcode = IB_WR_LOCAL_INV; | 2259 | inv_wr.opcode = IB_WR_LOCAL_INV; |
| 2206 | inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; | 2260 | inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; |
| 2207 | wr = &inv_wr; | 2261 | wr = &inv_wr; |
| @@ -2212,6 +2266,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, | |||
| 2212 | 2266 | ||
| 2213 | /* Prepare FASTREG WR */ | 2267 | /* Prepare FASTREG WR */ |
| 2214 | memset(&fr_wr, 0, sizeof(fr_wr)); | 2268 | memset(&fr_wr, 0, sizeof(fr_wr)); |
| 2269 | fr_wr.wr_id = ISER_FASTREG_LI_WRID; | ||
| 2215 | fr_wr.opcode = IB_WR_FAST_REG_MR; | 2270 | fr_wr.opcode = IB_WR_FAST_REG_MR; |
| 2216 | fr_wr.wr.fast_reg.iova_start = | 2271 | fr_wr.wr.fast_reg.iova_start = |
| 2217 | fr_desc->data_frpl->page_list[0] + page_off; | 2272 | fr_desc->data_frpl->page_list[0] + page_off; |
| @@ -2377,12 +2432,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
| 2377 | isert_init_send_wr(isert_conn, isert_cmd, | 2432 | isert_init_send_wr(isert_conn, isert_cmd, |
| 2378 | &isert_cmd->tx_desc.send_wr, true); | 2433 | &isert_cmd->tx_desc.send_wr, true); |
| 2379 | 2434 | ||
| 2380 | atomic_inc(&isert_conn->post_send_buf_count); | 2435 | atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); |
| 2381 | 2436 | ||
| 2382 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); | 2437 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); |
| 2383 | if (rc) { | 2438 | if (rc) { |
| 2384 | pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); | 2439 | pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); |
| 2385 | atomic_dec(&isert_conn->post_send_buf_count); | 2440 | atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); |
| 2386 | } | 2441 | } |
| 2387 | pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", | 2442 | pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", |
| 2388 | isert_cmd); | 2443 | isert_cmd); |
| @@ -2410,12 +2465,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) | |||
| 2410 | return rc; | 2465 | return rc; |
| 2411 | } | 2466 | } |
| 2412 | 2467 | ||
| 2413 | atomic_inc(&isert_conn->post_send_buf_count); | 2468 | atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); |
| 2414 | 2469 | ||
| 2415 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); | 2470 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); |
| 2416 | if (rc) { | 2471 | if (rc) { |
| 2417 | pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); | 2472 | pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); |
| 2418 | atomic_dec(&isert_conn->post_send_buf_count); | 2473 | atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); |
| 2419 | } | 2474 | } |
| 2420 | pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", | 2475 | pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", |
| 2421 | isert_cmd); | 2476 | isert_cmd); |
| @@ -2702,22 +2757,11 @@ isert_free_np(struct iscsi_np *np) | |||
| 2702 | kfree(isert_np); | 2757 | kfree(isert_np); |
| 2703 | } | 2758 | } |
| 2704 | 2759 | ||
| 2705 | static int isert_check_state(struct isert_conn *isert_conn, int state) | 2760 | static void isert_wait_conn(struct iscsi_conn *conn) |
| 2706 | { | ||
| 2707 | int ret; | ||
| 2708 | |||
| 2709 | mutex_lock(&isert_conn->conn_mutex); | ||
| 2710 | ret = (isert_conn->state == state); | ||
| 2711 | mutex_unlock(&isert_conn->conn_mutex); | ||
| 2712 | |||
| 2713 | return ret; | ||
| 2714 | } | ||
| 2715 | |||
| 2716 | static void isert_free_conn(struct iscsi_conn *conn) | ||
| 2717 | { | 2761 | { |
| 2718 | struct isert_conn *isert_conn = conn->context; | 2762 | struct isert_conn *isert_conn = conn->context; |
| 2719 | 2763 | ||
| 2720 | pr_debug("isert_free_conn: Starting \n"); | 2764 | pr_debug("isert_wait_conn: Starting \n"); |
| 2721 | /* | 2765 | /* |
| 2722 | * Decrement post_send_buf_count for special case when called | 2766 | * Decrement post_send_buf_count for special case when called |
| 2723 | * from isert_do_control_comp() -> iscsit_logout_post_handler() | 2767 | * from isert_do_control_comp() -> iscsit_logout_post_handler() |
| @@ -2727,38 +2771,29 @@ static void isert_free_conn(struct iscsi_conn *conn) | |||
| 2727 | atomic_dec(&isert_conn->post_send_buf_count); | 2771 | atomic_dec(&isert_conn->post_send_buf_count); |
| 2728 | 2772 | ||
| 2729 | if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { | 2773 | if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { |
| 2730 | pr_debug("Calling rdma_disconnect from isert_free_conn\n"); | 2774 | pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); |
| 2731 | rdma_disconnect(isert_conn->conn_cm_id); | 2775 | rdma_disconnect(isert_conn->conn_cm_id); |
| 2732 | } | 2776 | } |
| 2733 | /* | 2777 | /* |
| 2734 | * Only wait for conn_wait_comp_err if the isert_conn made it | 2778 | * Only wait for conn_wait_comp_err if the isert_conn made it |
| 2735 | * into full feature phase.. | 2779 | * into full feature phase.. |
| 2736 | */ | 2780 | */ |
| 2737 | if (isert_conn->state == ISER_CONN_UP) { | ||
| 2738 | pr_debug("isert_free_conn: Before wait_event comp_err %d\n", | ||
| 2739 | isert_conn->state); | ||
| 2740 | mutex_unlock(&isert_conn->conn_mutex); | ||
| 2741 | |||
| 2742 | wait_event(isert_conn->conn_wait_comp_err, | ||
| 2743 | (isert_check_state(isert_conn, ISER_CONN_TERMINATING))); | ||
| 2744 | |||
| 2745 | wait_event(isert_conn->conn_wait, | ||
| 2746 | (isert_check_state(isert_conn, ISER_CONN_DOWN))); | ||
| 2747 | |||
| 2748 | isert_put_conn(isert_conn); | ||
| 2749 | return; | ||
| 2750 | } | ||
| 2751 | if (isert_conn->state == ISER_CONN_INIT) { | 2781 | if (isert_conn->state == ISER_CONN_INIT) { |
| 2752 | mutex_unlock(&isert_conn->conn_mutex); | 2782 | mutex_unlock(&isert_conn->conn_mutex); |
| 2753 | isert_put_conn(isert_conn); | ||
| 2754 | return; | 2783 | return; |
| 2755 | } | 2784 | } |
| 2756 | pr_debug("isert_free_conn: wait_event conn_wait %d\n", | 2785 | if (isert_conn->state == ISER_CONN_UP) |
| 2757 | isert_conn->state); | 2786 | isert_conn->state = ISER_CONN_TERMINATING; |
| 2758 | mutex_unlock(&isert_conn->conn_mutex); | 2787 | mutex_unlock(&isert_conn->conn_mutex); |
| 2759 | 2788 | ||
| 2760 | wait_event(isert_conn->conn_wait, | 2789 | wait_for_completion(&isert_conn->conn_wait_comp_err); |
| 2761 | (isert_check_state(isert_conn, ISER_CONN_DOWN))); | 2790 | |
| 2791 | wait_for_completion(&isert_conn->conn_wait); | ||
| 2792 | } | ||
| 2793 | |||
| 2794 | static void isert_free_conn(struct iscsi_conn *conn) | ||
| 2795 | { | ||
| 2796 | struct isert_conn *isert_conn = conn->context; | ||
| 2762 | 2797 | ||
| 2763 | isert_put_conn(isert_conn); | 2798 | isert_put_conn(isert_conn); |
| 2764 | } | 2799 | } |
| @@ -2771,6 +2806,7 @@ static struct iscsit_transport iser_target_transport = { | |||
| 2771 | .iscsit_setup_np = isert_setup_np, | 2806 | .iscsit_setup_np = isert_setup_np, |
| 2772 | .iscsit_accept_np = isert_accept_np, | 2807 | .iscsit_accept_np = isert_accept_np, |
| 2773 | .iscsit_free_np = isert_free_np, | 2808 | .iscsit_free_np = isert_free_np, |
| 2809 | .iscsit_wait_conn = isert_wait_conn, | ||
| 2774 | .iscsit_free_conn = isert_free_conn, | 2810 | .iscsit_free_conn = isert_free_conn, |
| 2775 | .iscsit_get_login_rx = isert_get_login_rx, | 2811 | .iscsit_get_login_rx = isert_get_login_rx, |
| 2776 | .iscsit_put_login_tx = isert_put_login_tx, | 2812 | .iscsit_put_login_tx = isert_put_login_tx, |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 708a069002f3..f6ae7f5dd408 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | #define ISERT_RDMA_LISTEN_BACKLOG 10 | 7 | #define ISERT_RDMA_LISTEN_BACKLOG 10 |
| 8 | #define ISCSI_ISER_SG_TABLESIZE 256 | 8 | #define ISCSI_ISER_SG_TABLESIZE 256 |
| 9 | #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL | ||
| 9 | 10 | ||
| 10 | enum isert_desc_type { | 11 | enum isert_desc_type { |
| 11 | ISCSI_TX_CONTROL, | 12 | ISCSI_TX_CONTROL, |
| @@ -45,6 +46,7 @@ struct iser_tx_desc { | |||
| 45 | struct isert_cmd *isert_cmd; | 46 | struct isert_cmd *isert_cmd; |
| 46 | struct llist_node *comp_llnode_batch; | 47 | struct llist_node *comp_llnode_batch; |
| 47 | struct llist_node comp_llnode; | 48 | struct llist_node comp_llnode; |
| 49 | bool llnode_active; | ||
| 48 | struct ib_send_wr send_wr; | 50 | struct ib_send_wr send_wr; |
| 49 | } __packed; | 51 | } __packed; |
| 50 | 52 | ||
| @@ -116,8 +118,8 @@ struct isert_conn { | |||
| 116 | struct isert_device *conn_device; | 118 | struct isert_device *conn_device; |
| 117 | struct work_struct conn_logout_work; | 119 | struct work_struct conn_logout_work; |
| 118 | struct mutex conn_mutex; | 120 | struct mutex conn_mutex; |
| 119 | wait_queue_head_t conn_wait; | 121 | struct completion conn_wait; |
| 120 | wait_queue_head_t conn_wait_comp_err; | 122 | struct completion conn_wait_comp_err; |
| 121 | struct kref conn_kref; | 123 | struct kref conn_kref; |
| 122 | struct list_head conn_fr_pool; | 124 | struct list_head conn_fr_pool; |
| 123 | int conn_fr_pool_size; | 125 | int conn_fr_pool_size; |
| @@ -126,7 +128,6 @@ struct isert_conn { | |||
| 126 | #define ISERT_COMP_BATCH_COUNT 8 | 128 | #define ISERT_COMP_BATCH_COUNT 8 |
| 127 | int conn_comp_batch; | 129 | int conn_comp_batch; |
| 128 | struct llist_head conn_comp_llist; | 130 | struct llist_head conn_comp_llist; |
| 129 | struct mutex conn_comp_mutex; | ||
| 130 | }; | 131 | }; |
| 131 | 132 | ||
| 132 | #define ISERT_MAX_CQ 64 | 133 | #define ISERT_MAX_CQ 64 |
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig index f04686580040..9816c51eb5c2 100644 --- a/drivers/isdn/capi/Kconfig +++ b/drivers/isdn/capi/Kconfig | |||
| @@ -16,9 +16,17 @@ config CAPI_TRACE | |||
| 16 | This will increase the size of the kernelcapi module by 20 KB. | 16 | This will increase the size of the kernelcapi module by 20 KB. |
| 17 | If unsure, say Y. | 17 | If unsure, say Y. |
| 18 | 18 | ||
| 19 | config ISDN_CAPI_CAPI20 | ||
| 20 | tristate "CAPI2.0 /dev/capi support" | ||
| 21 | help | ||
| 22 | This option will provide the CAPI 2.0 interface to userspace | ||
| 23 | applications via /dev/capi20. Applications should use the | ||
| 24 | standardized libcapi20 to access this functionality. You should say | ||
| 25 | Y/M here. | ||
| 26 | |||
| 19 | config ISDN_CAPI_MIDDLEWARE | 27 | config ISDN_CAPI_MIDDLEWARE |
| 20 | bool "CAPI2.0 Middleware support" | 28 | bool "CAPI2.0 Middleware support" |
| 21 | depends on TTY | 29 | depends on ISDN_CAPI_CAPI20 && TTY |
| 22 | help | 30 | help |
| 23 | This option will enhance the capabilities of the /dev/capi20 | 31 | This option will enhance the capabilities of the /dev/capi20 |
| 24 | interface. It will provide a means of moving a data connection, | 32 | interface. It will provide a means of moving a data connection, |
| @@ -26,14 +34,6 @@ config ISDN_CAPI_MIDDLEWARE | |||
| 26 | device. If you want to use pppd with pppdcapiplugin to dial up to | 34 | device. If you want to use pppd with pppdcapiplugin to dial up to |
| 27 | your ISP, say Y here. | 35 | your ISP, say Y here. |
| 28 | 36 | ||
| 29 | config ISDN_CAPI_CAPI20 | ||
| 30 | tristate "CAPI2.0 /dev/capi support" | ||
| 31 | help | ||
| 32 | This option will provide the CAPI 2.0 interface to userspace | ||
| 33 | applications via /dev/capi20. Applications should use the | ||
| 34 | standardized libcapi20 to access this functionality. You should say | ||
| 35 | Y/M here. | ||
| 36 | |||
| 37 | config ISDN_CAPI_CAPIDRV | 37 | config ISDN_CAPI_CAPIDRV |
| 38 | tristate "CAPI2.0 capidrv interface support" | 38 | tristate "CAPI2.0 capidrv interface support" |
| 39 | depends on ISDN_I4L | 39 | depends on ISDN_I4L |
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 9a06fe883766..95ad936e6048 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig | |||
| @@ -254,16 +254,6 @@ config DM_THIN_PROVISIONING | |||
| 254 | ---help--- | 254 | ---help--- |
| 255 | Provides thin provisioning and snapshots that share a data store. | 255 | Provides thin provisioning and snapshots that share a data store. |
| 256 | 256 | ||
| 257 | config DM_DEBUG_BLOCK_STACK_TRACING | ||
| 258 | boolean "Keep stack trace of persistent data block lock holders" | ||
| 259 | depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA | ||
| 260 | select STACKTRACE | ||
| 261 | ---help--- | ||
| 262 | Enable this for messages that may help debug problems with the | ||
| 263 | block manager locking used by thin provisioning and caching. | ||
| 264 | |||
| 265 | If unsure, say N. | ||
| 266 | |||
| 267 | config DM_CACHE | 257 | config DM_CACHE |
| 268 | tristate "Cache target (EXPERIMENTAL)" | 258 | tristate "Cache target (EXPERIMENTAL)" |
| 269 | depends on BLK_DEV_DM | 259 | depends on BLK_DEV_DM |
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 1e018e986610..0e385e40909e 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c | |||
| @@ -872,7 +872,7 @@ static void mq_destroy(struct dm_cache_policy *p) | |||
| 872 | { | 872 | { |
| 873 | struct mq_policy *mq = to_mq_policy(p); | 873 | struct mq_policy *mq = to_mq_policy(p); |
| 874 | 874 | ||
| 875 | kfree(mq->table); | 875 | vfree(mq->table); |
| 876 | epool_exit(&mq->cache_pool); | 876 | epool_exit(&mq->cache_pool); |
| 877 | epool_exit(&mq->pre_cache_pool); | 877 | epool_exit(&mq->pre_cache_pool); |
| 878 | kfree(mq); | 878 | kfree(mq); |
| @@ -1245,7 +1245,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size, | |||
| 1245 | 1245 | ||
| 1246 | mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16); | 1246 | mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16); |
| 1247 | mq->hash_bits = ffs(mq->nr_buckets) - 1; | 1247 | mq->hash_bits = ffs(mq->nr_buckets) - 1; |
| 1248 | mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL); | 1248 | mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets); |
| 1249 | if (!mq->table) | 1249 | if (!mq->table) |
| 1250 | goto bad_alloc_table; | 1250 | goto bad_alloc_table; |
| 1251 | 1251 | ||
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1af70145fab9..074b9c8e4cf0 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
| @@ -979,12 +979,13 @@ static void issue_copy_real(struct dm_cache_migration *mg) | |||
| 979 | int r; | 979 | int r; |
| 980 | struct dm_io_region o_region, c_region; | 980 | struct dm_io_region o_region, c_region; |
| 981 | struct cache *cache = mg->cache; | 981 | struct cache *cache = mg->cache; |
| 982 | sector_t cblock = from_cblock(mg->cblock); | ||
| 982 | 983 | ||
| 983 | o_region.bdev = cache->origin_dev->bdev; | 984 | o_region.bdev = cache->origin_dev->bdev; |
| 984 | o_region.count = cache->sectors_per_block; | 985 | o_region.count = cache->sectors_per_block; |
| 985 | 986 | ||
| 986 | c_region.bdev = cache->cache_dev->bdev; | 987 | c_region.bdev = cache->cache_dev->bdev; |
| 987 | c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block; | 988 | c_region.sector = cblock * cache->sectors_per_block; |
| 988 | c_region.count = cache->sectors_per_block; | 989 | c_region.count = cache->sectors_per_block; |
| 989 | 990 | ||
| 990 | if (mg->writeback || mg->demote) { | 991 | if (mg->writeback || mg->demote) { |
| @@ -2464,20 +2465,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
| 2464 | bool discarded_block; | 2465 | bool discarded_block; |
| 2465 | struct dm_bio_prison_cell *cell; | 2466 | struct dm_bio_prison_cell *cell; |
| 2466 | struct policy_result lookup_result; | 2467 | struct policy_result lookup_result; |
| 2467 | struct per_bio_data *pb; | 2468 | struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); |
| 2468 | 2469 | ||
| 2469 | if (from_oblock(block) > from_oblock(cache->origin_blocks)) { | 2470 | if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { |
| 2470 | /* | 2471 | /* |
| 2471 | * This can only occur if the io goes to a partial block at | 2472 | * This can only occur if the io goes to a partial block at |
| 2472 | * the end of the origin device. We don't cache these. | 2473 | * the end of the origin device. We don't cache these. |
| 2473 | * Just remap to the origin and carry on. | 2474 | * Just remap to the origin and carry on. |
| 2474 | */ | 2475 | */ |
| 2475 | remap_to_origin_clear_discard(cache, bio, block); | 2476 | remap_to_origin(cache, bio); |
| 2476 | return DM_MAPIO_REMAPPED; | 2477 | return DM_MAPIO_REMAPPED; |
| 2477 | } | 2478 | } |
| 2478 | 2479 | ||
| 2479 | pb = init_per_bio_data(bio, pb_data_size); | ||
| 2480 | |||
| 2481 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { | 2480 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { |
| 2482 | defer_bio(cache, bio); | 2481 | defer_bio(cache, bio); |
| 2483 | return DM_MAPIO_SUBMITTED; | 2482 | return DM_MAPIO_SUBMITTED; |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index afc3d017de4c..d6e88178d22c 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
| @@ -546,6 +546,9 @@ static int read_exceptions(struct pstore *ps, | |||
| 546 | r = insert_exceptions(ps, area, callback, callback_context, | 546 | r = insert_exceptions(ps, area, callback, callback_context, |
| 547 | &full); | 547 | &full); |
| 548 | 548 | ||
| 549 | if (!full) | ||
| 550 | memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT); | ||
| 551 | |||
| 549 | dm_bufio_release(bp); | 552 | dm_bufio_release(bp); |
| 550 | 553 | ||
| 551 | dm_bufio_forget(client, chunk); | 554 | dm_bufio_forget(client, chunk); |
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index baa87ff12816..fb9efc829182 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c | |||
| @@ -76,7 +76,7 @@ | |||
| 76 | 76 | ||
| 77 | #define THIN_SUPERBLOCK_MAGIC 27022010 | 77 | #define THIN_SUPERBLOCK_MAGIC 27022010 |
| 78 | #define THIN_SUPERBLOCK_LOCATION 0 | 78 | #define THIN_SUPERBLOCK_LOCATION 0 |
| 79 | #define THIN_VERSION 1 | 79 | #define THIN_VERSION 2 |
| 80 | #define THIN_METADATA_CACHE_SIZE 64 | 80 | #define THIN_METADATA_CACHE_SIZE 64 |
| 81 | #define SECTOR_TO_BLOCK_SHIFT 3 | 81 | #define SECTOR_TO_BLOCK_SHIFT 3 |
| 82 | 82 | ||
| @@ -1755,3 +1755,38 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, | |||
| 1755 | 1755 | ||
| 1756 | return r; | 1756 | return r; |
| 1757 | } | 1757 | } |
| 1758 | |||
| 1759 | int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd) | ||
| 1760 | { | ||
| 1761 | int r; | ||
| 1762 | struct dm_block *sblock; | ||
| 1763 | struct thin_disk_superblock *disk_super; | ||
| 1764 | |||
| 1765 | down_write(&pmd->root_lock); | ||
| 1766 | pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG; | ||
| 1767 | |||
| 1768 | r = superblock_lock(pmd, &sblock); | ||
| 1769 | if (r) { | ||
| 1770 | DMERR("couldn't read superblock"); | ||
| 1771 | goto out; | ||
| 1772 | } | ||
| 1773 | |||
| 1774 | disk_super = dm_block_data(sblock); | ||
| 1775 | disk_super->flags = cpu_to_le32(pmd->flags); | ||
| 1776 | |||
| 1777 | dm_bm_unlock(sblock); | ||
| 1778 | out: | ||
| 1779 | up_write(&pmd->root_lock); | ||
| 1780 | return r; | ||
| 1781 | } | ||
| 1782 | |||
| 1783 | bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd) | ||
| 1784 | { | ||
| 1785 | bool needs_check; | ||
| 1786 | |||
| 1787 | down_read(&pmd->root_lock); | ||
| 1788 | needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG; | ||
| 1789 | up_read(&pmd->root_lock); | ||
| 1790 | |||
| 1791 | return needs_check; | ||
| 1792 | } | ||
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 82ea384d36ff..e3c857db195a 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h | |||
| @@ -25,6 +25,11 @@ | |||
| 25 | 25 | ||
| 26 | /*----------------------------------------------------------------*/ | 26 | /*----------------------------------------------------------------*/ |
| 27 | 27 | ||
| 28 | /* | ||
| 29 | * Thin metadata superblock flags. | ||
| 30 | */ | ||
| 31 | #define THIN_METADATA_NEEDS_CHECK_FLAG (1 << 0) | ||
| 32 | |||
| 28 | struct dm_pool_metadata; | 33 | struct dm_pool_metadata; |
| 29 | struct dm_thin_device; | 34 | struct dm_thin_device; |
| 30 | 35 | ||
| @@ -202,6 +207,12 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, | |||
| 202 | dm_sm_threshold_fn fn, | 207 | dm_sm_threshold_fn fn, |
| 203 | void *context); | 208 | void *context); |
| 204 | 209 | ||
| 210 | /* | ||
| 211 | * Updates the superblock immediately. | ||
| 212 | */ | ||
| 213 | int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd); | ||
| 214 | bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd); | ||
| 215 | |||
| 205 | /*----------------------------------------------------------------*/ | 216 | /*----------------------------------------------------------------*/ |
| 206 | 217 | ||
| 207 | #endif | 218 | #endif |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 7e84baccf0ad..be70d38745f7 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -130,10 +130,11 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, | |||
| 130 | struct dm_thin_new_mapping; | 130 | struct dm_thin_new_mapping; |
| 131 | 131 | ||
| 132 | /* | 132 | /* |
| 133 | * The pool runs in 3 modes. Ordered in degraded order for comparisons. | 133 | * The pool runs in 4 modes. Ordered in degraded order for comparisons. |
| 134 | */ | 134 | */ |
| 135 | enum pool_mode { | 135 | enum pool_mode { |
| 136 | PM_WRITE, /* metadata may be changed */ | 136 | PM_WRITE, /* metadata may be changed */ |
| 137 | PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */ | ||
| 137 | PM_READ_ONLY, /* metadata may not be changed */ | 138 | PM_READ_ONLY, /* metadata may not be changed */ |
| 138 | PM_FAIL, /* all I/O fails */ | 139 | PM_FAIL, /* all I/O fails */ |
| 139 | }; | 140 | }; |
| @@ -198,7 +199,6 @@ struct pool { | |||
| 198 | }; | 199 | }; |
| 199 | 200 | ||
| 200 | static enum pool_mode get_pool_mode(struct pool *pool); | 201 | static enum pool_mode get_pool_mode(struct pool *pool); |
| 201 | static void out_of_data_space(struct pool *pool); | ||
| 202 | static void metadata_operation_failed(struct pool *pool, const char *op, int r); | 202 | static void metadata_operation_failed(struct pool *pool, const char *op, int r); |
| 203 | 203 | ||
| 204 | /* | 204 | /* |
| @@ -226,6 +226,7 @@ struct thin_c { | |||
| 226 | 226 | ||
| 227 | struct pool *pool; | 227 | struct pool *pool; |
| 228 | struct dm_thin_device *td; | 228 | struct dm_thin_device *td; |
| 229 | bool requeue_mode:1; | ||
| 229 | }; | 230 | }; |
| 230 | 231 | ||
| 231 | /*----------------------------------------------------------------*/ | 232 | /*----------------------------------------------------------------*/ |
| @@ -369,14 +370,18 @@ struct dm_thin_endio_hook { | |||
| 369 | struct dm_thin_new_mapping *overwrite_mapping; | 370 | struct dm_thin_new_mapping *overwrite_mapping; |
| 370 | }; | 371 | }; |
| 371 | 372 | ||
| 372 | static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) | 373 | static void requeue_bio_list(struct thin_c *tc, struct bio_list *master) |
| 373 | { | 374 | { |
| 374 | struct bio *bio; | 375 | struct bio *bio; |
| 375 | struct bio_list bios; | 376 | struct bio_list bios; |
| 377 | unsigned long flags; | ||
| 376 | 378 | ||
| 377 | bio_list_init(&bios); | 379 | bio_list_init(&bios); |
| 380 | |||
| 381 | spin_lock_irqsave(&tc->pool->lock, flags); | ||
| 378 | bio_list_merge(&bios, master); | 382 | bio_list_merge(&bios, master); |
| 379 | bio_list_init(master); | 383 | bio_list_init(master); |
| 384 | spin_unlock_irqrestore(&tc->pool->lock, flags); | ||
| 380 | 385 | ||
| 381 | while ((bio = bio_list_pop(&bios))) { | 386 | while ((bio = bio_list_pop(&bios))) { |
| 382 | struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); | 387 | struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); |
| @@ -391,12 +396,26 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) | |||
| 391 | static void requeue_io(struct thin_c *tc) | 396 | static void requeue_io(struct thin_c *tc) |
| 392 | { | 397 | { |
| 393 | struct pool *pool = tc->pool; | 398 | struct pool *pool = tc->pool; |
| 399 | |||
| 400 | requeue_bio_list(tc, &pool->deferred_bios); | ||
| 401 | requeue_bio_list(tc, &pool->retry_on_resume_list); | ||
| 402 | } | ||
| 403 | |||
| 404 | static void error_retry_list(struct pool *pool) | ||
| 405 | { | ||
| 406 | struct bio *bio; | ||
| 394 | unsigned long flags; | 407 | unsigned long flags; |
| 408 | struct bio_list bios; | ||
| 409 | |||
| 410 | bio_list_init(&bios); | ||
| 395 | 411 | ||
| 396 | spin_lock_irqsave(&pool->lock, flags); | 412 | spin_lock_irqsave(&pool->lock, flags); |
| 397 | __requeue_bio_list(tc, &pool->deferred_bios); | 413 | bio_list_merge(&bios, &pool->retry_on_resume_list); |
| 398 | __requeue_bio_list(tc, &pool->retry_on_resume_list); | 414 | bio_list_init(&pool->retry_on_resume_list); |
| 399 | spin_unlock_irqrestore(&pool->lock, flags); | 415 | spin_unlock_irqrestore(&pool->lock, flags); |
| 416 | |||
| 417 | while ((bio = bio_list_pop(&bios))) | ||
| 418 | bio_io_error(bio); | ||
| 400 | } | 419 | } |
| 401 | 420 | ||
| 402 | /* | 421 | /* |
| @@ -925,13 +944,15 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks) | |||
| 925 | } | 944 | } |
| 926 | } | 945 | } |
| 927 | 946 | ||
| 947 | static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); | ||
| 948 | |||
| 928 | static int alloc_data_block(struct thin_c *tc, dm_block_t *result) | 949 | static int alloc_data_block(struct thin_c *tc, dm_block_t *result) |
| 929 | { | 950 | { |
| 930 | int r; | 951 | int r; |
| 931 | dm_block_t free_blocks; | 952 | dm_block_t free_blocks; |
| 932 | struct pool *pool = tc->pool; | 953 | struct pool *pool = tc->pool; |
| 933 | 954 | ||
| 934 | if (get_pool_mode(pool) != PM_WRITE) | 955 | if (WARN_ON(get_pool_mode(pool) != PM_WRITE)) |
| 935 | return -EINVAL; | 956 | return -EINVAL; |
| 936 | 957 | ||
| 937 | r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); | 958 | r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); |
| @@ -958,7 +979,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) | |||
| 958 | } | 979 | } |
| 959 | 980 | ||
| 960 | if (!free_blocks) { | 981 | if (!free_blocks) { |
| 961 | out_of_data_space(pool); | 982 | set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); |
| 962 | return -ENOSPC; | 983 | return -ENOSPC; |
| 963 | } | 984 | } |
| 964 | } | 985 | } |
| @@ -988,15 +1009,32 @@ static void retry_on_resume(struct bio *bio) | |||
| 988 | spin_unlock_irqrestore(&pool->lock, flags); | 1009 | spin_unlock_irqrestore(&pool->lock, flags); |
| 989 | } | 1010 | } |
| 990 | 1011 | ||
| 991 | static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) | 1012 | static bool should_error_unserviceable_bio(struct pool *pool) |
| 992 | { | 1013 | { |
| 993 | /* | 1014 | enum pool_mode m = get_pool_mode(pool); |
| 994 | * When pool is read-only, no cell locking is needed because | 1015 | |
| 995 | * nothing is changing. | 1016 | switch (m) { |
| 996 | */ | 1017 | case PM_WRITE: |
| 997 | WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY); | 1018 | /* Shouldn't get here */ |
| 1019 | DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); | ||
| 1020 | return true; | ||
| 1021 | |||
| 1022 | case PM_OUT_OF_DATA_SPACE: | ||
| 1023 | return pool->pf.error_if_no_space; | ||
| 998 | 1024 | ||
| 999 | if (pool->pf.error_if_no_space) | 1025 | case PM_READ_ONLY: |
| 1026 | case PM_FAIL: | ||
| 1027 | return true; | ||
| 1028 | default: | ||
| 1029 | /* Shouldn't get here */ | ||
| 1030 | DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); | ||
| 1031 | return true; | ||
| 1032 | } | ||
| 1033 | } | ||
| 1034 | |||
| 1035 | static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) | ||
| 1036 | { | ||
| 1037 | if (should_error_unserviceable_bio(pool)) | ||
| 1000 | bio_io_error(bio); | 1038 | bio_io_error(bio); |
| 1001 | else | 1039 | else |
| 1002 | retry_on_resume(bio); | 1040 | retry_on_resume(bio); |
| @@ -1007,11 +1045,20 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c | |||
| 1007 | struct bio *bio; | 1045 | struct bio *bio; |
| 1008 | struct bio_list bios; | 1046 | struct bio_list bios; |
| 1009 | 1047 | ||
| 1048 | if (should_error_unserviceable_bio(pool)) { | ||
| 1049 | cell_error(pool, cell); | ||
| 1050 | return; | ||
| 1051 | } | ||
| 1052 | |||
| 1010 | bio_list_init(&bios); | 1053 | bio_list_init(&bios); |
| 1011 | cell_release(pool, cell, &bios); | 1054 | cell_release(pool, cell, &bios); |
| 1012 | 1055 | ||
| 1013 | while ((bio = bio_list_pop(&bios))) | 1056 | if (should_error_unserviceable_bio(pool)) |
| 1014 | handle_unserviceable_bio(pool, bio); | 1057 | while ((bio = bio_list_pop(&bios))) |
| 1058 | bio_io_error(bio); | ||
| 1059 | else | ||
| 1060 | while ((bio = bio_list_pop(&bios))) | ||
| 1061 | retry_on_resume(bio); | ||
| 1015 | } | 1062 | } |
| 1016 | 1063 | ||
| 1017 | static void process_discard(struct thin_c *tc, struct bio *bio) | 1064 | static void process_discard(struct thin_c *tc, struct bio *bio) |
| @@ -1296,6 +1343,11 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio) | |||
| 1296 | } | 1343 | } |
| 1297 | } | 1344 | } |
| 1298 | 1345 | ||
| 1346 | static void process_bio_success(struct thin_c *tc, struct bio *bio) | ||
| 1347 | { | ||
| 1348 | bio_endio(bio, 0); | ||
| 1349 | } | ||
| 1350 | |||
| 1299 | static void process_bio_fail(struct thin_c *tc, struct bio *bio) | 1351 | static void process_bio_fail(struct thin_c *tc, struct bio *bio) |
| 1300 | { | 1352 | { |
| 1301 | bio_io_error(bio); | 1353 | bio_io_error(bio); |
| @@ -1328,6 +1380,11 @@ static void process_deferred_bios(struct pool *pool) | |||
| 1328 | struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); | 1380 | struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); |
| 1329 | struct thin_c *tc = h->tc; | 1381 | struct thin_c *tc = h->tc; |
| 1330 | 1382 | ||
| 1383 | if (tc->requeue_mode) { | ||
| 1384 | bio_endio(bio, DM_ENDIO_REQUEUE); | ||
| 1385 | continue; | ||
| 1386 | } | ||
| 1387 | |||
| 1331 | /* | 1388 | /* |
| 1332 | * If we've got no free new_mapping structs, and processing | 1389 | * If we've got no free new_mapping structs, and processing |
| 1333 | * this bio might require one, we pause until there are some | 1390 | * this bio might require one, we pause until there are some |
| @@ -1394,51 +1451,134 @@ static void do_waker(struct work_struct *ws) | |||
| 1394 | 1451 | ||
| 1395 | /*----------------------------------------------------------------*/ | 1452 | /*----------------------------------------------------------------*/ |
| 1396 | 1453 | ||
| 1454 | struct noflush_work { | ||
| 1455 | struct work_struct worker; | ||
| 1456 | struct thin_c *tc; | ||
| 1457 | |||
| 1458 | atomic_t complete; | ||
| 1459 | wait_queue_head_t wait; | ||
| 1460 | }; | ||
| 1461 | |||
| 1462 | static void complete_noflush_work(struct noflush_work *w) | ||
| 1463 | { | ||
| 1464 | atomic_set(&w->complete, 1); | ||
| 1465 | wake_up(&w->wait); | ||
| 1466 | } | ||
| 1467 | |||
| 1468 | static void do_noflush_start(struct work_struct *ws) | ||
| 1469 | { | ||
| 1470 | struct noflush_work *w = container_of(ws, struct noflush_work, worker); | ||
| 1471 | w->tc->requeue_mode = true; | ||
| 1472 | requeue_io(w->tc); | ||
| 1473 | complete_noflush_work(w); | ||
| 1474 | } | ||
| 1475 | |||
| 1476 | static void do_noflush_stop(struct work_struct *ws) | ||
| 1477 | { | ||
| 1478 | struct noflush_work *w = container_of(ws, struct noflush_work, worker); | ||
| 1479 | w->tc->requeue_mode = false; | ||
| 1480 | complete_noflush_work(w); | ||
| 1481 | } | ||
| 1482 | |||
| 1483 | static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) | ||
| 1484 | { | ||
| 1485 | struct noflush_work w; | ||
| 1486 | |||
| 1487 | INIT_WORK(&w.worker, fn); | ||
| 1488 | w.tc = tc; | ||
| 1489 | atomic_set(&w.complete, 0); | ||
| 1490 | init_waitqueue_head(&w.wait); | ||
| 1491 | |||
| 1492 | queue_work(tc->pool->wq, &w.worker); | ||
| 1493 | |||
| 1494 | wait_event(w.wait, atomic_read(&w.complete)); | ||
| 1495 | } | ||
| 1496 | |||
| 1497 | /*----------------------------------------------------------------*/ | ||
| 1498 | |||
| 1397 | static enum pool_mode get_pool_mode(struct pool *pool) | 1499 | static enum pool_mode get_pool_mode(struct pool *pool) |
| 1398 | { | 1500 | { |
| 1399 | return pool->pf.mode; | 1501 | return pool->pf.mode; |
| 1400 | } | 1502 | } |
| 1401 | 1503 | ||
| 1504 | static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode) | ||
| 1505 | { | ||
| 1506 | dm_table_event(pool->ti->table); | ||
| 1507 | DMINFO("%s: switching pool to %s mode", | ||
| 1508 | dm_device_name(pool->pool_md), new_mode); | ||
| 1509 | } | ||
| 1510 | |||
| 1402 | static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | 1511 | static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) |
| 1403 | { | 1512 | { |
| 1404 | int r; | 1513 | struct pool_c *pt = pool->ti->private; |
| 1405 | enum pool_mode old_mode = pool->pf.mode; | 1514 | bool needs_check = dm_pool_metadata_needs_check(pool->pmd); |
| 1515 | enum pool_mode old_mode = get_pool_mode(pool); | ||
| 1516 | |||
| 1517 | /* | ||
| 1518 | * Never allow the pool to transition to PM_WRITE mode if user | ||
| 1519 | * intervention is required to verify metadata and data consistency. | ||
| 1520 | */ | ||
| 1521 | if (new_mode == PM_WRITE && needs_check) { | ||
| 1522 | DMERR("%s: unable to switch pool to write mode until repaired.", | ||
| 1523 | dm_device_name(pool->pool_md)); | ||
| 1524 | if (old_mode != new_mode) | ||
| 1525 | new_mode = old_mode; | ||
| 1526 | else | ||
| 1527 | new_mode = PM_READ_ONLY; | ||
| 1528 | } | ||
| 1529 | /* | ||
| 1530 | * If we were in PM_FAIL mode, rollback of metadata failed. We're | ||
| 1531 | * not going to recover without a thin_repair. So we never let the | ||
| 1532 | * pool move out of the old mode. | ||
| 1533 | */ | ||
| 1534 | if (old_mode == PM_FAIL) | ||
| 1535 | new_mode = old_mode; | ||
| 1406 | 1536 | ||
| 1407 | switch (new_mode) { | 1537 | switch (new_mode) { |
| 1408 | case PM_FAIL: | 1538 | case PM_FAIL: |
| 1409 | if (old_mode != new_mode) | 1539 | if (old_mode != new_mode) |
| 1410 | DMERR("%s: switching pool to failure mode", | 1540 | notify_of_pool_mode_change(pool, "failure"); |
| 1411 | dm_device_name(pool->pool_md)); | ||
| 1412 | dm_pool_metadata_read_only(pool->pmd); | 1541 | dm_pool_metadata_read_only(pool->pmd); |
| 1413 | pool->process_bio = process_bio_fail; | 1542 | pool->process_bio = process_bio_fail; |
| 1414 | pool->process_discard = process_bio_fail; | 1543 | pool->process_discard = process_bio_fail; |
| 1415 | pool->process_prepared_mapping = process_prepared_mapping_fail; | 1544 | pool->process_prepared_mapping = process_prepared_mapping_fail; |
| 1416 | pool->process_prepared_discard = process_prepared_discard_fail; | 1545 | pool->process_prepared_discard = process_prepared_discard_fail; |
| 1546 | |||
| 1547 | error_retry_list(pool); | ||
| 1417 | break; | 1548 | break; |
| 1418 | 1549 | ||
| 1419 | case PM_READ_ONLY: | 1550 | case PM_READ_ONLY: |
| 1420 | if (old_mode != new_mode) | 1551 | if (old_mode != new_mode) |
| 1421 | DMERR("%s: switching pool to read-only mode", | 1552 | notify_of_pool_mode_change(pool, "read-only"); |
| 1422 | dm_device_name(pool->pool_md)); | 1553 | dm_pool_metadata_read_only(pool->pmd); |
| 1423 | r = dm_pool_abort_metadata(pool->pmd); | 1554 | pool->process_bio = process_bio_read_only; |
| 1424 | if (r) { | 1555 | pool->process_discard = process_bio_success; |
| 1425 | DMERR("%s: aborting transaction failed", | 1556 | pool->process_prepared_mapping = process_prepared_mapping_fail; |
| 1426 | dm_device_name(pool->pool_md)); | 1557 | pool->process_prepared_discard = process_prepared_discard_passdown; |
| 1427 | new_mode = PM_FAIL; | 1558 | |
| 1428 | set_pool_mode(pool, new_mode); | 1559 | error_retry_list(pool); |
| 1429 | } else { | 1560 | break; |
| 1430 | dm_pool_metadata_read_only(pool->pmd); | 1561 | |
| 1431 | pool->process_bio = process_bio_read_only; | 1562 | case PM_OUT_OF_DATA_SPACE: |
| 1432 | pool->process_discard = process_discard; | 1563 | /* |
| 1433 | pool->process_prepared_mapping = process_prepared_mapping_fail; | 1564 | * Ideally we'd never hit this state; the low water mark |
| 1434 | pool->process_prepared_discard = process_prepared_discard_passdown; | 1565 | * would trigger userland to extend the pool before we |
| 1435 | } | 1566 | * completely run out of data space. However, many small |
| 1567 | * IOs to unprovisioned space can consume data space at an | ||
| 1568 | * alarming rate. Adjust your low water mark if you're | ||
| 1569 | * frequently seeing this mode. | ||
| 1570 | */ | ||
| 1571 | if (old_mode != new_mode) | ||
| 1572 | notify_of_pool_mode_change(pool, "out-of-data-space"); | ||
| 1573 | pool->process_bio = process_bio_read_only; | ||
| 1574 | pool->process_discard = process_discard; | ||
| 1575 | pool->process_prepared_mapping = process_prepared_mapping; | ||
| 1576 | pool->process_prepared_discard = process_prepared_discard_passdown; | ||
| 1436 | break; | 1577 | break; |
| 1437 | 1578 | ||
| 1438 | case PM_WRITE: | 1579 | case PM_WRITE: |
| 1439 | if (old_mode != new_mode) | 1580 | if (old_mode != new_mode) |
| 1440 | DMINFO("%s: switching pool to write mode", | 1581 | notify_of_pool_mode_change(pool, "write"); |
| 1441 | dm_device_name(pool->pool_md)); | ||
| 1442 | dm_pool_metadata_read_write(pool->pmd); | 1582 | dm_pool_metadata_read_write(pool->pmd); |
| 1443 | pool->process_bio = process_bio; | 1583 | pool->process_bio = process_bio; |
| 1444 | pool->process_discard = process_discard; | 1584 | pool->process_discard = process_discard; |
| @@ -1448,32 +1588,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
| 1448 | } | 1588 | } |
| 1449 | 1589 | ||
| 1450 | pool->pf.mode = new_mode; | 1590 | pool->pf.mode = new_mode; |
| 1591 | /* | ||
| 1592 | * The pool mode may have changed, sync it so bind_control_target() | ||
| 1593 | * doesn't cause an unexpected mode transition on resume. | ||
| 1594 | */ | ||
| 1595 | pt->adjusted_pf.mode = new_mode; | ||
| 1451 | } | 1596 | } |
| 1452 | 1597 | ||
| 1453 | /* | 1598 | static void abort_transaction(struct pool *pool) |
| 1454 | * Rather than calling set_pool_mode directly, use these which describe the | ||
| 1455 | * reason for mode degradation. | ||
| 1456 | */ | ||
| 1457 | static void out_of_data_space(struct pool *pool) | ||
| 1458 | { | 1599 | { |
| 1459 | DMERR_LIMIT("%s: no free data space available.", | 1600 | const char *dev_name = dm_device_name(pool->pool_md); |
| 1460 | dm_device_name(pool->pool_md)); | 1601 | |
| 1461 | set_pool_mode(pool, PM_READ_ONLY); | 1602 | DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); |
| 1603 | if (dm_pool_abort_metadata(pool->pmd)) { | ||
| 1604 | DMERR("%s: failed to abort metadata transaction", dev_name); | ||
| 1605 | set_pool_mode(pool, PM_FAIL); | ||
| 1606 | } | ||
| 1607 | |||
| 1608 | if (dm_pool_metadata_set_needs_check(pool->pmd)) { | ||
| 1609 | DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); | ||
| 1610 | set_pool_mode(pool, PM_FAIL); | ||
| 1611 | } | ||
| 1462 | } | 1612 | } |
| 1463 | 1613 | ||
| 1464 | static void metadata_operation_failed(struct pool *pool, const char *op, int r) | 1614 | static void metadata_operation_failed(struct pool *pool, const char *op, int r) |
| 1465 | { | 1615 | { |
| 1466 | dm_block_t free_blocks; | ||
| 1467 | |||
| 1468 | DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d", | 1616 | DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d", |
| 1469 | dm_device_name(pool->pool_md), op, r); | 1617 | dm_device_name(pool->pool_md), op, r); |
| 1470 | 1618 | ||
| 1471 | if (r == -ENOSPC && | 1619 | abort_transaction(pool); |
| 1472 | !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) && | ||
| 1473 | !free_blocks) | ||
| 1474 | DMERR_LIMIT("%s: no free metadata space available.", | ||
| 1475 | dm_device_name(pool->pool_md)); | ||
| 1476 | |||
| 1477 | set_pool_mode(pool, PM_READ_ONLY); | 1620 | set_pool_mode(pool, PM_READ_ONLY); |
| 1478 | } | 1621 | } |
| 1479 | 1622 | ||
| @@ -1524,6 +1667,11 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) | |||
| 1524 | 1667 | ||
| 1525 | thin_hook_bio(tc, bio); | 1668 | thin_hook_bio(tc, bio); |
| 1526 | 1669 | ||
| 1670 | if (tc->requeue_mode) { | ||
| 1671 | bio_endio(bio, DM_ENDIO_REQUEUE); | ||
| 1672 | return DM_MAPIO_SUBMITTED; | ||
| 1673 | } | ||
| 1674 | |||
| 1527 | if (get_pool_mode(tc->pool) == PM_FAIL) { | 1675 | if (get_pool_mode(tc->pool) == PM_FAIL) { |
| 1528 | bio_io_error(bio); | 1676 | bio_io_error(bio); |
| 1529 | return DM_MAPIO_SUBMITTED; | 1677 | return DM_MAPIO_SUBMITTED; |
| @@ -1687,7 +1835,7 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) | |||
| 1687 | /* | 1835 | /* |
| 1688 | * We want to make sure that a pool in PM_FAIL mode is never upgraded. | 1836 | * We want to make sure that a pool in PM_FAIL mode is never upgraded. |
| 1689 | */ | 1837 | */ |
| 1690 | enum pool_mode old_mode = pool->pf.mode; | 1838 | enum pool_mode old_mode = get_pool_mode(pool); |
| 1691 | enum pool_mode new_mode = pt->adjusted_pf.mode; | 1839 | enum pool_mode new_mode = pt->adjusted_pf.mode; |
| 1692 | 1840 | ||
| 1693 | /* | 1841 | /* |
| @@ -1701,16 +1849,6 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) | |||
| 1701 | pool->pf = pt->adjusted_pf; | 1849 | pool->pf = pt->adjusted_pf; |
| 1702 | pool->low_water_blocks = pt->low_water_blocks; | 1850 | pool->low_water_blocks = pt->low_water_blocks; |
| 1703 | 1851 | ||
| 1704 | /* | ||
| 1705 | * If we were in PM_FAIL mode, rollback of metadata failed. We're | ||
| 1706 | * not going to recover without a thin_repair. So we never let the | ||
| 1707 | * pool move out of the old mode. On the other hand a PM_READ_ONLY | ||
| 1708 | * may have been due to a lack of metadata or data space, and may | ||
| 1709 | * now work (ie. if the underlying devices have been resized). | ||
| 1710 | */ | ||
| 1711 | if (old_mode == PM_FAIL) | ||
| 1712 | new_mode = old_mode; | ||
| 1713 | |||
| 1714 | set_pool_mode(pool, new_mode); | 1852 | set_pool_mode(pool, new_mode); |
| 1715 | 1853 | ||
| 1716 | return 0; | 1854 | return 0; |
| @@ -2253,6 +2391,12 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit) | |||
| 2253 | return -EINVAL; | 2391 | return -EINVAL; |
| 2254 | 2392 | ||
| 2255 | } else if (data_size > sb_data_size) { | 2393 | } else if (data_size > sb_data_size) { |
| 2394 | if (dm_pool_metadata_needs_check(pool->pmd)) { | ||
| 2395 | DMERR("%s: unable to grow the data device until repaired.", | ||
| 2396 | dm_device_name(pool->pool_md)); | ||
| 2397 | return 0; | ||
| 2398 | } | ||
| 2399 | |||
| 2256 | if (sb_data_size) | 2400 | if (sb_data_size) |
| 2257 | DMINFO("%s: growing the data device from %llu to %llu blocks", | 2401 | DMINFO("%s: growing the data device from %llu to %llu blocks", |
| 2258 | dm_device_name(pool->pool_md), | 2402 | dm_device_name(pool->pool_md), |
| @@ -2294,6 +2438,12 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) | |||
| 2294 | return -EINVAL; | 2438 | return -EINVAL; |
| 2295 | 2439 | ||
| 2296 | } else if (metadata_dev_size > sb_metadata_dev_size) { | 2440 | } else if (metadata_dev_size > sb_metadata_dev_size) { |
| 2441 | if (dm_pool_metadata_needs_check(pool->pmd)) { | ||
| 2442 | DMERR("%s: unable to grow the metadata device until repaired.", | ||
| 2443 | dm_device_name(pool->pool_md)); | ||
| 2444 | return 0; | ||
| 2445 | } | ||
| 2446 | |||
| 2297 | warn_if_metadata_device_too_big(pool->md_dev); | 2447 | warn_if_metadata_device_too_big(pool->md_dev); |
| 2298 | DMINFO("%s: growing the metadata device from %llu to %llu blocks", | 2448 | DMINFO("%s: growing the metadata device from %llu to %llu blocks", |
| 2299 | dm_device_name(pool->pool_md), | 2449 | dm_device_name(pool->pool_md), |
| @@ -2681,7 +2831,9 @@ static void pool_status(struct dm_target *ti, status_type_t type, | |||
| 2681 | else | 2831 | else |
| 2682 | DMEMIT("- "); | 2832 | DMEMIT("- "); |
| 2683 | 2833 | ||
| 2684 | if (pool->pf.mode == PM_READ_ONLY) | 2834 | if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) |
| 2835 | DMEMIT("out_of_data_space "); | ||
| 2836 | else if (pool->pf.mode == PM_READ_ONLY) | ||
| 2685 | DMEMIT("ro "); | 2837 | DMEMIT("ro "); |
| 2686 | else | 2838 | else |
| 2687 | DMEMIT("rw "); | 2839 | DMEMIT("rw "); |
| @@ -2795,7 +2947,7 @@ static struct target_type pool_target = { | |||
| 2795 | .name = "thin-pool", | 2947 | .name = "thin-pool", |
| 2796 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | | 2948 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | |
| 2797 | DM_TARGET_IMMUTABLE, | 2949 | DM_TARGET_IMMUTABLE, |
| 2798 | .version = {1, 10, 0}, | 2950 | .version = {1, 11, 0}, |
| 2799 | .module = THIS_MODULE, | 2951 | .module = THIS_MODULE, |
| 2800 | .ctr = pool_ctr, | 2952 | .ctr = pool_ctr, |
| 2801 | .dtr = pool_dtr, | 2953 | .dtr = pool_dtr, |
| @@ -2997,10 +3149,23 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err) | |||
| 2997 | return 0; | 3149 | return 0; |
| 2998 | } | 3150 | } |
| 2999 | 3151 | ||
| 3000 | static void thin_postsuspend(struct dm_target *ti) | 3152 | static void thin_presuspend(struct dm_target *ti) |
| 3001 | { | 3153 | { |
| 3154 | struct thin_c *tc = ti->private; | ||
| 3155 | |||
| 3002 | if (dm_noflush_suspending(ti)) | 3156 | if (dm_noflush_suspending(ti)) |
| 3003 | requeue_io((struct thin_c *)ti->private); | 3157 | noflush_work(tc, do_noflush_start); |
| 3158 | } | ||
| 3159 | |||
| 3160 | static void thin_postsuspend(struct dm_target *ti) | ||
| 3161 | { | ||
| 3162 | struct thin_c *tc = ti->private; | ||
| 3163 | |||
| 3164 | /* | ||
| 3165 | * The dm_noflush_suspending flag has been cleared by now, so | ||
| 3166 | * unfortunately we must always run this. | ||
| 3167 | */ | ||
| 3168 | noflush_work(tc, do_noflush_stop); | ||
| 3004 | } | 3169 | } |
| 3005 | 3170 | ||
| 3006 | /* | 3171 | /* |
| @@ -3085,12 +3250,13 @@ static int thin_iterate_devices(struct dm_target *ti, | |||
| 3085 | 3250 | ||
| 3086 | static struct target_type thin_target = { | 3251 | static struct target_type thin_target = { |
| 3087 | .name = "thin", | 3252 | .name = "thin", |
| 3088 | .version = {1, 10, 0}, | 3253 | .version = {1, 11, 0}, |
| 3089 | .module = THIS_MODULE, | 3254 | .module = THIS_MODULE, |
| 3090 | .ctr = thin_ctr, | 3255 | .ctr = thin_ctr, |
| 3091 | .dtr = thin_dtr, | 3256 | .dtr = thin_dtr, |
| 3092 | .map = thin_map, | 3257 | .map = thin_map, |
| 3093 | .end_io = thin_endio, | 3258 | .end_io = thin_endio, |
| 3259 | .presuspend = thin_presuspend, | ||
| 3094 | .postsuspend = thin_postsuspend, | 3260 | .postsuspend = thin_postsuspend, |
| 3095 | .status = thin_status, | 3261 | .status = thin_status, |
| 3096 | .iterate_devices = thin_iterate_devices, | 3262 | .iterate_devices = thin_iterate_devices, |
diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig index 19b268795415..0c2dec7aec20 100644 --- a/drivers/md/persistent-data/Kconfig +++ b/drivers/md/persistent-data/Kconfig | |||
| @@ -6,3 +6,13 @@ config DM_PERSISTENT_DATA | |||
| 6 | ---help--- | 6 | ---help--- |
| 7 | Library providing immutable on-disk data structure support for | 7 | Library providing immutable on-disk data structure support for |
| 8 | device-mapper targets such as the thin provisioning target. | 8 | device-mapper targets such as the thin provisioning target. |
| 9 | |||
| 10 | config DM_DEBUG_BLOCK_STACK_TRACING | ||
| 11 | boolean "Keep stack trace of persistent data block lock holders" | ||
| 12 | depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA | ||
| 13 | select STACKTRACE | ||
| 14 | ---help--- | ||
| 15 | Enable this for messages that may help debug problems with the | ||
| 16 | block manager locking used by thin provisioning and caching. | ||
| 17 | |||
| 18 | If unsure, say N. | ||
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index e9bdd462f4f5..786b689bdfc7 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c | |||
| @@ -91,6 +91,69 @@ struct block_op { | |||
| 91 | dm_block_t block; | 91 | dm_block_t block; |
| 92 | }; | 92 | }; |
| 93 | 93 | ||
| 94 | struct bop_ring_buffer { | ||
| 95 | unsigned begin; | ||
| 96 | unsigned end; | ||
| 97 | struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1]; | ||
| 98 | }; | ||
| 99 | |||
| 100 | static void brb_init(struct bop_ring_buffer *brb) | ||
| 101 | { | ||
| 102 | brb->begin = 0; | ||
| 103 | brb->end = 0; | ||
| 104 | } | ||
| 105 | |||
| 106 | static bool brb_empty(struct bop_ring_buffer *brb) | ||
| 107 | { | ||
| 108 | return brb->begin == brb->end; | ||
| 109 | } | ||
| 110 | |||
| 111 | static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old) | ||
| 112 | { | ||
| 113 | unsigned r = old + 1; | ||
| 114 | return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r; | ||
| 115 | } | ||
| 116 | |||
| 117 | static int brb_push(struct bop_ring_buffer *brb, | ||
| 118 | enum block_op_type type, dm_block_t b) | ||
| 119 | { | ||
| 120 | struct block_op *bop; | ||
| 121 | unsigned next = brb_next(brb, brb->end); | ||
| 122 | |||
| 123 | /* | ||
| 124 | * We don't allow the last bop to be filled, this way we can | ||
| 125 | * differentiate between full and empty. | ||
| 126 | */ | ||
| 127 | if (next == brb->begin) | ||
| 128 | return -ENOMEM; | ||
| 129 | |||
| 130 | bop = brb->bops + brb->end; | ||
| 131 | bop->type = type; | ||
| 132 | bop->block = b; | ||
| 133 | |||
| 134 | brb->end = next; | ||
| 135 | |||
| 136 | return 0; | ||
| 137 | } | ||
| 138 | |||
| 139 | static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result) | ||
| 140 | { | ||
| 141 | struct block_op *bop; | ||
| 142 | |||
| 143 | if (brb_empty(brb)) | ||
| 144 | return -ENODATA; | ||
| 145 | |||
| 146 | bop = brb->bops + brb->begin; | ||
| 147 | result->type = bop->type; | ||
| 148 | result->block = bop->block; | ||
| 149 | |||
| 150 | brb->begin = brb_next(brb, brb->begin); | ||
| 151 | |||
| 152 | return 0; | ||
| 153 | } | ||
| 154 | |||
| 155 | /*----------------------------------------------------------------*/ | ||
| 156 | |||
| 94 | struct sm_metadata { | 157 | struct sm_metadata { |
| 95 | struct dm_space_map sm; | 158 | struct dm_space_map sm; |
| 96 | 159 | ||
| @@ -101,25 +164,20 @@ struct sm_metadata { | |||
| 101 | 164 | ||
| 102 | unsigned recursion_count; | 165 | unsigned recursion_count; |
| 103 | unsigned allocated_this_transaction; | 166 | unsigned allocated_this_transaction; |
| 104 | unsigned nr_uncommitted; | 167 | struct bop_ring_buffer uncommitted; |
| 105 | struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS]; | ||
| 106 | 168 | ||
| 107 | struct threshold threshold; | 169 | struct threshold threshold; |
| 108 | }; | 170 | }; |
| 109 | 171 | ||
| 110 | static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) | 172 | static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) |
| 111 | { | 173 | { |
| 112 | struct block_op *op; | 174 | int r = brb_push(&smm->uncommitted, type, b); |
| 113 | 175 | ||
| 114 | if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) { | 176 | if (r) { |
| 115 | DMERR("too many recursive allocations"); | 177 | DMERR("too many recursive allocations"); |
| 116 | return -ENOMEM; | 178 | return -ENOMEM; |
| 117 | } | 179 | } |
| 118 | 180 | ||
| 119 | op = smm->uncommitted + smm->nr_uncommitted++; | ||
| 120 | op->type = type; | ||
| 121 | op->block = b; | ||
| 122 | |||
| 123 | return 0; | 181 | return 0; |
| 124 | } | 182 | } |
| 125 | 183 | ||
| @@ -158,11 +216,17 @@ static int out(struct sm_metadata *smm) | |||
| 158 | return -ENOMEM; | 216 | return -ENOMEM; |
| 159 | } | 217 | } |
| 160 | 218 | ||
| 161 | if (smm->recursion_count == 1 && smm->nr_uncommitted) { | 219 | if (smm->recursion_count == 1) { |
| 162 | while (smm->nr_uncommitted && !r) { | 220 | while (!brb_empty(&smm->uncommitted)) { |
| 163 | smm->nr_uncommitted--; | 221 | struct block_op bop; |
| 164 | r = commit_bop(smm, smm->uncommitted + | 222 | |
| 165 | smm->nr_uncommitted); | 223 | r = brb_pop(&smm->uncommitted, &bop); |
| 224 | if (r) { | ||
| 225 | DMERR("bug in bop ring buffer"); | ||
| 226 | break; | ||
| 227 | } | ||
| 228 | |||
| 229 | r = commit_bop(smm, &bop); | ||
| 166 | if (r) | 230 | if (r) |
| 167 | break; | 231 | break; |
| 168 | } | 232 | } |
| @@ -217,7 +281,8 @@ static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count) | |||
| 217 | static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, | 281 | static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, |
| 218 | uint32_t *result) | 282 | uint32_t *result) |
| 219 | { | 283 | { |
| 220 | int r, i; | 284 | int r; |
| 285 | unsigned i; | ||
| 221 | struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); | 286 | struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); |
| 222 | unsigned adjustment = 0; | 287 | unsigned adjustment = 0; |
| 223 | 288 | ||
| @@ -225,8 +290,10 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, | |||
| 225 | * We may have some uncommitted adjustments to add. This list | 290 | * We may have some uncommitted adjustments to add. This list |
| 226 | * should always be really short. | 291 | * should always be really short. |
| 227 | */ | 292 | */ |
| 228 | for (i = 0; i < smm->nr_uncommitted; i++) { | 293 | for (i = smm->uncommitted.begin; |
| 229 | struct block_op *op = smm->uncommitted + i; | 294 | i != smm->uncommitted.end; |
| 295 | i = brb_next(&smm->uncommitted, i)) { | ||
| 296 | struct block_op *op = smm->uncommitted.bops + i; | ||
| 230 | 297 | ||
| 231 | if (op->block != b) | 298 | if (op->block != b) |
| 232 | continue; | 299 | continue; |
| @@ -254,7 +321,8 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, | |||
| 254 | static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm, | 321 | static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm, |
| 255 | dm_block_t b, int *result) | 322 | dm_block_t b, int *result) |
| 256 | { | 323 | { |
| 257 | int r, i, adjustment = 0; | 324 | int r, adjustment = 0; |
| 325 | unsigned i; | ||
| 258 | struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); | 326 | struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); |
| 259 | uint32_t rc; | 327 | uint32_t rc; |
| 260 | 328 | ||
| @@ -262,8 +330,11 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm, | |||
| 262 | * We may have some uncommitted adjustments to add. This list | 330 | * We may have some uncommitted adjustments to add. This list |
| 263 | * should always be really short. | 331 | * should always be really short. |
| 264 | */ | 332 | */ |
| 265 | for (i = 0; i < smm->nr_uncommitted; i++) { | 333 | for (i = smm->uncommitted.begin; |
| 266 | struct block_op *op = smm->uncommitted + i; | 334 | i != smm->uncommitted.end; |
| 335 | i = brb_next(&smm->uncommitted, i)) { | ||
| 336 | |||
| 337 | struct block_op *op = smm->uncommitted.bops + i; | ||
| 267 | 338 | ||
| 268 | if (op->block != b) | 339 | if (op->block != b) |
| 269 | continue; | 340 | continue; |
| @@ -671,7 +742,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm, | |||
| 671 | smm->begin = superblock + 1; | 742 | smm->begin = superblock + 1; |
| 672 | smm->recursion_count = 0; | 743 | smm->recursion_count = 0; |
| 673 | smm->allocated_this_transaction = 0; | 744 | smm->allocated_this_transaction = 0; |
| 674 | smm->nr_uncommitted = 0; | 745 | brb_init(&smm->uncommitted); |
| 675 | threshold_init(&smm->threshold); | 746 | threshold_init(&smm->threshold); |
| 676 | 747 | ||
| 677 | memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); | 748 | memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); |
| @@ -715,7 +786,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm, | |||
| 715 | smm->begin = 0; | 786 | smm->begin = 0; |
| 716 | smm->recursion_count = 0; | 787 | smm->recursion_count = 0; |
| 717 | smm->allocated_this_transaction = 0; | 788 | smm->allocated_this_transaction = 0; |
| 718 | smm->nr_uncommitted = 0; | 789 | brb_init(&smm->uncommitted); |
| 719 | threshold_init(&smm->threshold); | 790 | threshold_init(&smm->threshold); |
| 720 | 791 | ||
| 721 | memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); | 792 | memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); |
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index b9e2000969f0..95c894482fdd 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c | |||
| @@ -240,7 +240,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, | |||
| 240 | 240 | ||
| 241 | nid = cpu_to_node(cpu); | 241 | nid = cpu_to_node(cpu); |
| 242 | page = alloc_pages_exact_node(nid, | 242 | page = alloc_pages_exact_node(nid, |
| 243 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 243 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
| 244 | pg_order); | 244 | pg_order); |
| 245 | if (page == NULL) { | 245 | if (page == NULL) { |
| 246 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " | 246 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 6d20fbde8d43..dcde56057fe1 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
| @@ -181,7 +181,7 @@ static inline int __agg_has_partner(struct aggregator *agg) | |||
| 181 | */ | 181 | */ |
| 182 | static inline void __disable_port(struct port *port) | 182 | static inline void __disable_port(struct port *port) |
| 183 | { | 183 | { |
| 184 | bond_set_slave_inactive_flags(port->slave); | 184 | bond_set_slave_inactive_flags(port->slave, BOND_SLAVE_NOTIFY_LATER); |
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | /** | 187 | /** |
| @@ -193,7 +193,7 @@ static inline void __enable_port(struct port *port) | |||
| 193 | struct slave *slave = port->slave; | 193 | struct slave *slave = port->slave; |
| 194 | 194 | ||
| 195 | if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev)) | 195 | if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev)) |
| 196 | bond_set_slave_active_flags(slave); | 196 | bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER); |
| 197 | } | 197 | } |
| 198 | 198 | ||
| 199 | /** | 199 | /** |
| @@ -2062,6 +2062,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
| 2062 | struct list_head *iter; | 2062 | struct list_head *iter; |
| 2063 | struct slave *slave; | 2063 | struct slave *slave; |
| 2064 | struct port *port; | 2064 | struct port *port; |
| 2065 | bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; | ||
| 2065 | 2066 | ||
| 2066 | read_lock(&bond->lock); | 2067 | read_lock(&bond->lock); |
| 2067 | rcu_read_lock(); | 2068 | rcu_read_lock(); |
| @@ -2119,8 +2120,19 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
| 2119 | } | 2120 | } |
| 2120 | 2121 | ||
| 2121 | re_arm: | 2122 | re_arm: |
| 2123 | bond_for_each_slave_rcu(bond, slave, iter) { | ||
| 2124 | if (slave->should_notify) { | ||
| 2125 | should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; | ||
| 2126 | break; | ||
| 2127 | } | ||
| 2128 | } | ||
| 2122 | rcu_read_unlock(); | 2129 | rcu_read_unlock(); |
| 2123 | read_unlock(&bond->lock); | 2130 | read_unlock(&bond->lock); |
| 2131 | |||
| 2132 | if (should_notify_rtnl && rtnl_trylock()) { | ||
| 2133 | bond_slave_state_notify(bond); | ||
| 2134 | rtnl_unlock(); | ||
| 2135 | } | ||
| 2124 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); | 2136 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); |
| 2125 | } | 2137 | } |
| 2126 | 2138 | ||
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index a2c47476804d..e8f133e926aa 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
| @@ -730,7 +730,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon | |||
| 730 | client_info->ntt = 0; | 730 | client_info->ntt = 0; |
| 731 | } | 731 | } |
| 732 | 732 | ||
| 733 | if (!vlan_get_tag(skb, &client_info->vlan_id)) | 733 | if (vlan_get_tag(skb, &client_info->vlan_id)) |
| 734 | client_info->vlan_id = 0; | 734 | client_info->vlan_id = 0; |
| 735 | 735 | ||
| 736 | if (!client_info->assigned) { | 736 | if (!client_info->assigned) { |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1c6104d3501d..e5628fc725c3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -829,21 +829,25 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) | |||
| 829 | if (bond_is_lb(bond)) { | 829 | if (bond_is_lb(bond)) { |
| 830 | bond_alb_handle_active_change(bond, new_active); | 830 | bond_alb_handle_active_change(bond, new_active); |
| 831 | if (old_active) | 831 | if (old_active) |
| 832 | bond_set_slave_inactive_flags(old_active); | 832 | bond_set_slave_inactive_flags(old_active, |
| 833 | BOND_SLAVE_NOTIFY_NOW); | ||
| 833 | if (new_active) | 834 | if (new_active) |
| 834 | bond_set_slave_active_flags(new_active); | 835 | bond_set_slave_active_flags(new_active, |
| 836 | BOND_SLAVE_NOTIFY_NOW); | ||
| 835 | } else { | 837 | } else { |
| 836 | rcu_assign_pointer(bond->curr_active_slave, new_active); | 838 | rcu_assign_pointer(bond->curr_active_slave, new_active); |
| 837 | } | 839 | } |
| 838 | 840 | ||
| 839 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { | 841 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { |
| 840 | if (old_active) | 842 | if (old_active) |
| 841 | bond_set_slave_inactive_flags(old_active); | 843 | bond_set_slave_inactive_flags(old_active, |
| 844 | BOND_SLAVE_NOTIFY_NOW); | ||
| 842 | 845 | ||
| 843 | if (new_active) { | 846 | if (new_active) { |
| 844 | bool should_notify_peers = false; | 847 | bool should_notify_peers = false; |
| 845 | 848 | ||
| 846 | bond_set_slave_active_flags(new_active); | 849 | bond_set_slave_active_flags(new_active, |
| 850 | BOND_SLAVE_NOTIFY_NOW); | ||
| 847 | 851 | ||
| 848 | if (bond->params.fail_over_mac) | 852 | if (bond->params.fail_over_mac) |
| 849 | bond_do_fail_over_mac(bond, new_active, | 853 | bond_do_fail_over_mac(bond, new_active, |
| @@ -1193,6 +1197,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
| 1193 | return -EBUSY; | 1197 | return -EBUSY; |
| 1194 | } | 1198 | } |
| 1195 | 1199 | ||
| 1200 | if (bond_dev == slave_dev) { | ||
| 1201 | pr_err("%s: cannot enslave bond to itself.\n", bond_dev->name); | ||
| 1202 | return -EPERM; | ||
| 1203 | } | ||
| 1204 | |||
| 1196 | /* vlan challenged mutual exclusion */ | 1205 | /* vlan challenged mutual exclusion */ |
| 1197 | /* no need to lock since we're protected by rtnl_lock */ | 1206 | /* no need to lock since we're protected by rtnl_lock */ |
| 1198 | if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { | 1207 | if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { |
| @@ -1463,14 +1472,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
| 1463 | 1472 | ||
| 1464 | switch (bond->params.mode) { | 1473 | switch (bond->params.mode) { |
| 1465 | case BOND_MODE_ACTIVEBACKUP: | 1474 | case BOND_MODE_ACTIVEBACKUP: |
| 1466 | bond_set_slave_inactive_flags(new_slave); | 1475 | bond_set_slave_inactive_flags(new_slave, |
| 1476 | BOND_SLAVE_NOTIFY_NOW); | ||
| 1467 | break; | 1477 | break; |
| 1468 | case BOND_MODE_8023AD: | 1478 | case BOND_MODE_8023AD: |
| 1469 | /* in 802.3ad mode, the internal mechanism | 1479 | /* in 802.3ad mode, the internal mechanism |
| 1470 | * will activate the slaves in the selected | 1480 | * will activate the slaves in the selected |
| 1471 | * aggregator | 1481 | * aggregator |
| 1472 | */ | 1482 | */ |
| 1473 | bond_set_slave_inactive_flags(new_slave); | 1483 | bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); |
| 1474 | /* if this is the first slave */ | 1484 | /* if this is the first slave */ |
| 1475 | if (!prev_slave) { | 1485 | if (!prev_slave) { |
| 1476 | SLAVE_AD_INFO(new_slave).id = 1; | 1486 | SLAVE_AD_INFO(new_slave).id = 1; |
| @@ -1488,7 +1498,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
| 1488 | case BOND_MODE_TLB: | 1498 | case BOND_MODE_TLB: |
| 1489 | case BOND_MODE_ALB: | 1499 | case BOND_MODE_ALB: |
| 1490 | bond_set_active_slave(new_slave); | 1500 | bond_set_active_slave(new_slave); |
| 1491 | bond_set_slave_inactive_flags(new_slave); | 1501 | bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); |
| 1492 | break; | 1502 | break; |
| 1493 | default: | 1503 | default: |
| 1494 | pr_debug("This slave is always active in trunk mode\n"); | 1504 | pr_debug("This slave is always active in trunk mode\n"); |
| @@ -1654,9 +1664,6 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
| 1654 | return -EINVAL; | 1664 | return -EINVAL; |
| 1655 | } | 1665 | } |
| 1656 | 1666 | ||
| 1657 | /* release the slave from its bond */ | ||
| 1658 | bond->slave_cnt--; | ||
| 1659 | |||
| 1660 | bond_sysfs_slave_del(slave); | 1667 | bond_sysfs_slave_del(slave); |
| 1661 | 1668 | ||
| 1662 | bond_upper_dev_unlink(bond_dev, slave_dev); | 1669 | bond_upper_dev_unlink(bond_dev, slave_dev); |
| @@ -1738,6 +1745,7 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
| 1738 | 1745 | ||
| 1739 | unblock_netpoll_tx(); | 1746 | unblock_netpoll_tx(); |
| 1740 | synchronize_rcu(); | 1747 | synchronize_rcu(); |
| 1748 | bond->slave_cnt--; | ||
| 1741 | 1749 | ||
| 1742 | if (!bond_has_slaves(bond)) { | 1750 | if (!bond_has_slaves(bond)) { |
| 1743 | call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); | 1751 | call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); |
| @@ -2015,7 +2023,8 @@ static void bond_miimon_commit(struct bonding *bond) | |||
| 2015 | 2023 | ||
| 2016 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || | 2024 | if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || |
| 2017 | bond->params.mode == BOND_MODE_8023AD) | 2025 | bond->params.mode == BOND_MODE_8023AD) |
| 2018 | bond_set_slave_inactive_flags(slave); | 2026 | bond_set_slave_inactive_flags(slave, |
| 2027 | BOND_SLAVE_NOTIFY_NOW); | ||
| 2019 | 2028 | ||
| 2020 | pr_info("%s: link status definitely down for interface %s, disabling it\n", | 2029 | pr_info("%s: link status definitely down for interface %s, disabling it\n", |
| 2021 | bond->dev->name, slave->dev->name); | 2030 | bond->dev->name, slave->dev->name); |
| @@ -2562,7 +2571,8 @@ static void bond_ab_arp_commit(struct bonding *bond) | |||
| 2562 | slave->link = BOND_LINK_UP; | 2571 | slave->link = BOND_LINK_UP; |
| 2563 | if (bond->current_arp_slave) { | 2572 | if (bond->current_arp_slave) { |
| 2564 | bond_set_slave_inactive_flags( | 2573 | bond_set_slave_inactive_flags( |
| 2565 | bond->current_arp_slave); | 2574 | bond->current_arp_slave, |
| 2575 | BOND_SLAVE_NOTIFY_NOW); | ||
| 2566 | bond->current_arp_slave = NULL; | 2576 | bond->current_arp_slave = NULL; |
| 2567 | } | 2577 | } |
| 2568 | 2578 | ||
| @@ -2582,7 +2592,8 @@ static void bond_ab_arp_commit(struct bonding *bond) | |||
| 2582 | slave->link_failure_count++; | 2592 | slave->link_failure_count++; |
| 2583 | 2593 | ||
| 2584 | slave->link = BOND_LINK_DOWN; | 2594 | slave->link = BOND_LINK_DOWN; |
| 2585 | bond_set_slave_inactive_flags(slave); | 2595 | bond_set_slave_inactive_flags(slave, |
| 2596 | BOND_SLAVE_NOTIFY_NOW); | ||
| 2586 | 2597 | ||
| 2587 | pr_info("%s: link status definitely down for interface %s, disabling it\n", | 2598 | pr_info("%s: link status definitely down for interface %s, disabling it\n", |
| 2588 | bond->dev->name, slave->dev->name); | 2599 | bond->dev->name, slave->dev->name); |
| @@ -2615,17 +2626,17 @@ do_failover: | |||
| 2615 | 2626 | ||
| 2616 | /* | 2627 | /* |
| 2617 | * Send ARP probes for active-backup mode ARP monitor. | 2628 | * Send ARP probes for active-backup mode ARP monitor. |
| 2629 | * | ||
| 2630 | * Called with rcu_read_lock hold. | ||
| 2618 | */ | 2631 | */ |
| 2619 | static bool bond_ab_arp_probe(struct bonding *bond) | 2632 | static bool bond_ab_arp_probe(struct bonding *bond) |
| 2620 | { | 2633 | { |
| 2621 | struct slave *slave, *before = NULL, *new_slave = NULL, | 2634 | struct slave *slave, *before = NULL, *new_slave = NULL, |
| 2622 | *curr_arp_slave, *curr_active_slave; | 2635 | *curr_arp_slave = rcu_dereference(bond->current_arp_slave), |
| 2636 | *curr_active_slave = rcu_dereference(bond->curr_active_slave); | ||
| 2623 | struct list_head *iter; | 2637 | struct list_head *iter; |
| 2624 | bool found = false; | 2638 | bool found = false; |
| 2625 | 2639 | bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; | |
| 2626 | rcu_read_lock(); | ||
| 2627 | curr_arp_slave = rcu_dereference(bond->current_arp_slave); | ||
| 2628 | curr_active_slave = rcu_dereference(bond->curr_active_slave); | ||
| 2629 | 2640 | ||
| 2630 | if (curr_arp_slave && curr_active_slave) | 2641 | if (curr_arp_slave && curr_active_slave) |
| 2631 | pr_info("PROBE: c_arp %s && cas %s BAD\n", | 2642 | pr_info("PROBE: c_arp %s && cas %s BAD\n", |
| @@ -2634,32 +2645,23 @@ static bool bond_ab_arp_probe(struct bonding *bond) | |||
| 2634 | 2645 | ||
| 2635 | if (curr_active_slave) { | 2646 | if (curr_active_slave) { |
| 2636 | bond_arp_send_all(bond, curr_active_slave); | 2647 | bond_arp_send_all(bond, curr_active_slave); |
| 2637 | rcu_read_unlock(); | 2648 | return should_notify_rtnl; |
| 2638 | return true; | ||
| 2639 | } | 2649 | } |
| 2640 | rcu_read_unlock(); | ||
| 2641 | 2650 | ||
| 2642 | /* if we don't have a curr_active_slave, search for the next available | 2651 | /* if we don't have a curr_active_slave, search for the next available |
| 2643 | * backup slave from the current_arp_slave and make it the candidate | 2652 | * backup slave from the current_arp_slave and make it the candidate |
| 2644 | * for becoming the curr_active_slave | 2653 | * for becoming the curr_active_slave |
| 2645 | */ | 2654 | */ |
| 2646 | 2655 | ||
| 2647 | if (!rtnl_trylock()) | ||
| 2648 | return false; | ||
| 2649 | /* curr_arp_slave might have gone away */ | ||
| 2650 | curr_arp_slave = ACCESS_ONCE(bond->current_arp_slave); | ||
| 2651 | |||
| 2652 | if (!curr_arp_slave) { | 2656 | if (!curr_arp_slave) { |
| 2653 | curr_arp_slave = bond_first_slave(bond); | 2657 | curr_arp_slave = bond_first_slave_rcu(bond); |
| 2654 | if (!curr_arp_slave) { | 2658 | if (!curr_arp_slave) |
| 2655 | rtnl_unlock(); | 2659 | return should_notify_rtnl; |
| 2656 | return true; | ||
| 2657 | } | ||
| 2658 | } | 2660 | } |
| 2659 | 2661 | ||
| 2660 | bond_set_slave_inactive_flags(curr_arp_slave); | 2662 | bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER); |
| 2661 | 2663 | ||
| 2662 | bond_for_each_slave(bond, slave, iter) { | 2664 | bond_for_each_slave_rcu(bond, slave, iter) { |
| 2663 | if (!found && !before && IS_UP(slave->dev)) | 2665 | if (!found && !before && IS_UP(slave->dev)) |
| 2664 | before = slave; | 2666 | before = slave; |
| 2665 | 2667 | ||
| @@ -2677,7 +2679,8 @@ static bool bond_ab_arp_probe(struct bonding *bond) | |||
| 2677 | if (slave->link_failure_count < UINT_MAX) | 2679 | if (slave->link_failure_count < UINT_MAX) |
| 2678 | slave->link_failure_count++; | 2680 | slave->link_failure_count++; |
| 2679 | 2681 | ||
| 2680 | bond_set_slave_inactive_flags(slave); | 2682 | bond_set_slave_inactive_flags(slave, |
| 2683 | BOND_SLAVE_NOTIFY_LATER); | ||
| 2681 | 2684 | ||
| 2682 | pr_info("%s: backup interface %s is now down.\n", | 2685 | pr_info("%s: backup interface %s is now down.\n", |
| 2683 | bond->dev->name, slave->dev->name); | 2686 | bond->dev->name, slave->dev->name); |
| @@ -2689,26 +2692,31 @@ static bool bond_ab_arp_probe(struct bonding *bond) | |||
| 2689 | if (!new_slave && before) | 2692 | if (!new_slave && before) |
| 2690 | new_slave = before; | 2693 | new_slave = before; |
| 2691 | 2694 | ||
| 2692 | if (!new_slave) { | 2695 | if (!new_slave) |
| 2693 | rtnl_unlock(); | 2696 | goto check_state; |
| 2694 | return true; | ||
| 2695 | } | ||
| 2696 | 2697 | ||
| 2697 | new_slave->link = BOND_LINK_BACK; | 2698 | new_slave->link = BOND_LINK_BACK; |
| 2698 | bond_set_slave_active_flags(new_slave); | 2699 | bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); |
| 2699 | bond_arp_send_all(bond, new_slave); | 2700 | bond_arp_send_all(bond, new_slave); |
| 2700 | new_slave->jiffies = jiffies; | 2701 | new_slave->jiffies = jiffies; |
| 2701 | rcu_assign_pointer(bond->current_arp_slave, new_slave); | 2702 | rcu_assign_pointer(bond->current_arp_slave, new_slave); |
| 2702 | rtnl_unlock(); | ||
| 2703 | 2703 | ||
| 2704 | return true; | 2704 | check_state: |
| 2705 | bond_for_each_slave_rcu(bond, slave, iter) { | ||
| 2706 | if (slave->should_notify) { | ||
| 2707 | should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; | ||
| 2708 | break; | ||
| 2709 | } | ||
| 2710 | } | ||
| 2711 | return should_notify_rtnl; | ||
| 2705 | } | 2712 | } |
| 2706 | 2713 | ||
| 2707 | static void bond_activebackup_arp_mon(struct work_struct *work) | 2714 | static void bond_activebackup_arp_mon(struct work_struct *work) |
| 2708 | { | 2715 | { |
| 2709 | struct bonding *bond = container_of(work, struct bonding, | 2716 | struct bonding *bond = container_of(work, struct bonding, |
| 2710 | arp_work.work); | 2717 | arp_work.work); |
| 2711 | bool should_notify_peers = false, should_commit = false; | 2718 | bool should_notify_peers = false; |
| 2719 | bool should_notify_rtnl = false; | ||
| 2712 | int delta_in_ticks; | 2720 | int delta_in_ticks; |
| 2713 | 2721 | ||
| 2714 | delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); | 2722 | delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); |
| @@ -2717,11 +2725,12 @@ static void bond_activebackup_arp_mon(struct work_struct *work) | |||
| 2717 | goto re_arm; | 2725 | goto re_arm; |
| 2718 | 2726 | ||
| 2719 | rcu_read_lock(); | 2727 | rcu_read_lock(); |
| 2728 | |||
| 2720 | should_notify_peers = bond_should_notify_peers(bond); | 2729 | should_notify_peers = bond_should_notify_peers(bond); |
| 2721 | should_commit = bond_ab_arp_inspect(bond); | ||
| 2722 | rcu_read_unlock(); | ||
| 2723 | 2730 | ||
| 2724 | if (should_commit) { | 2731 | if (bond_ab_arp_inspect(bond)) { |
| 2732 | rcu_read_unlock(); | ||
| 2733 | |||
| 2725 | /* Race avoidance with bond_close flush of workqueue */ | 2734 | /* Race avoidance with bond_close flush of workqueue */ |
| 2726 | if (!rtnl_trylock()) { | 2735 | if (!rtnl_trylock()) { |
| 2727 | delta_in_ticks = 1; | 2736 | delta_in_ticks = 1; |
| @@ -2730,23 +2739,28 @@ static void bond_activebackup_arp_mon(struct work_struct *work) | |||
| 2730 | } | 2739 | } |
| 2731 | 2740 | ||
| 2732 | bond_ab_arp_commit(bond); | 2741 | bond_ab_arp_commit(bond); |
| 2742 | |||
| 2733 | rtnl_unlock(); | 2743 | rtnl_unlock(); |
| 2744 | rcu_read_lock(); | ||
| 2734 | } | 2745 | } |
| 2735 | 2746 | ||
| 2736 | if (!bond_ab_arp_probe(bond)) { | 2747 | should_notify_rtnl = bond_ab_arp_probe(bond); |
| 2737 | /* rtnl locking failed, re-arm */ | 2748 | rcu_read_unlock(); |
| 2738 | delta_in_ticks = 1; | ||
| 2739 | should_notify_peers = false; | ||
| 2740 | } | ||
| 2741 | 2749 | ||
| 2742 | re_arm: | 2750 | re_arm: |
| 2743 | if (bond->params.arp_interval) | 2751 | if (bond->params.arp_interval) |
| 2744 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); | 2752 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
| 2745 | 2753 | ||
| 2746 | if (should_notify_peers) { | 2754 | if (should_notify_peers || should_notify_rtnl) { |
| 2747 | if (!rtnl_trylock()) | 2755 | if (!rtnl_trylock()) |
| 2748 | return; | 2756 | return; |
| 2749 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); | 2757 | |
| 2758 | if (should_notify_peers) | ||
| 2759 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, | ||
| 2760 | bond->dev); | ||
| 2761 | if (should_notify_rtnl) | ||
| 2762 | bond_slave_state_notify(bond); | ||
| 2763 | |||
| 2750 | rtnl_unlock(); | 2764 | rtnl_unlock(); |
| 2751 | } | 2765 | } |
| 2752 | } | 2766 | } |
| @@ -3046,9 +3060,11 @@ static int bond_open(struct net_device *bond_dev) | |||
| 3046 | bond_for_each_slave(bond, slave, iter) { | 3060 | bond_for_each_slave(bond, slave, iter) { |
| 3047 | if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) | 3061 | if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) |
| 3048 | && (slave != bond->curr_active_slave)) { | 3062 | && (slave != bond->curr_active_slave)) { |
| 3049 | bond_set_slave_inactive_flags(slave); | 3063 | bond_set_slave_inactive_flags(slave, |
| 3064 | BOND_SLAVE_NOTIFY_NOW); | ||
| 3050 | } else { | 3065 | } else { |
| 3051 | bond_set_slave_active_flags(slave); | 3066 | bond_set_slave_active_flags(slave, |
| 3067 | BOND_SLAVE_NOTIFY_NOW); | ||
| 3052 | } | 3068 | } |
| 3053 | } | 3069 | } |
| 3054 | read_unlock(&bond->curr_slave_lock); | 3070 | read_unlock(&bond->curr_slave_lock); |
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index c37878432717..298c26509095 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c | |||
| @@ -121,6 +121,7 @@ static struct bond_opt_value bond_resend_igmp_tbl[] = { | |||
| 121 | static struct bond_opt_value bond_lp_interval_tbl[] = { | 121 | static struct bond_opt_value bond_lp_interval_tbl[] = { |
| 122 | { "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT}, | 122 | { "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT}, |
| 123 | { "maxval", INT_MAX, BOND_VALFLAG_MAX}, | 123 | { "maxval", INT_MAX, BOND_VALFLAG_MAX}, |
| 124 | { NULL, -1, 0}, | ||
| 124 | }; | 125 | }; |
| 125 | 126 | ||
| 126 | static struct bond_option bond_opts[] = { | 127 | static struct bond_option bond_opts[] = { |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 86ccfb9f71cc..2b0fdec695f7 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
| @@ -195,7 +195,8 @@ struct slave { | |||
| 195 | s8 new_link; | 195 | s8 new_link; |
| 196 | u8 backup:1, /* indicates backup slave. Value corresponds with | 196 | u8 backup:1, /* indicates backup slave. Value corresponds with |
| 197 | BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ | 197 | BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ |
| 198 | inactive:1; /* indicates inactive slave */ | 198 | inactive:1, /* indicates inactive slave */ |
| 199 | should_notify:1; /* indicateds whether the state changed */ | ||
| 199 | u8 duplex; | 200 | u8 duplex; |
| 200 | u32 original_mtu; | 201 | u32 original_mtu; |
| 201 | u32 link_failure_count; | 202 | u32 link_failure_count; |
| @@ -303,6 +304,24 @@ static inline void bond_set_backup_slave(struct slave *slave) | |||
| 303 | } | 304 | } |
| 304 | } | 305 | } |
| 305 | 306 | ||
| 307 | static inline void bond_set_slave_state(struct slave *slave, | ||
| 308 | int slave_state, bool notify) | ||
| 309 | { | ||
| 310 | if (slave->backup == slave_state) | ||
| 311 | return; | ||
| 312 | |||
| 313 | slave->backup = slave_state; | ||
| 314 | if (notify) { | ||
| 315 | rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL); | ||
| 316 | slave->should_notify = 0; | ||
| 317 | } else { | ||
| 318 | if (slave->should_notify) | ||
| 319 | slave->should_notify = 0; | ||
| 320 | else | ||
| 321 | slave->should_notify = 1; | ||
| 322 | } | ||
| 323 | } | ||
| 324 | |||
| 306 | static inline void bond_slave_state_change(struct bonding *bond) | 325 | static inline void bond_slave_state_change(struct bonding *bond) |
| 307 | { | 326 | { |
| 308 | struct list_head *iter; | 327 | struct list_head *iter; |
| @@ -316,6 +335,19 @@ static inline void bond_slave_state_change(struct bonding *bond) | |||
| 316 | } | 335 | } |
| 317 | } | 336 | } |
| 318 | 337 | ||
| 338 | static inline void bond_slave_state_notify(struct bonding *bond) | ||
| 339 | { | ||
| 340 | struct list_head *iter; | ||
| 341 | struct slave *tmp; | ||
| 342 | |||
| 343 | bond_for_each_slave(bond, tmp, iter) { | ||
| 344 | if (tmp->should_notify) { | ||
| 345 | rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_KERNEL); | ||
| 346 | tmp->should_notify = 0; | ||
| 347 | } | ||
| 348 | } | ||
| 349 | } | ||
| 350 | |||
| 319 | static inline int bond_slave_state(struct slave *slave) | 351 | static inline int bond_slave_state(struct slave *slave) |
| 320 | { | 352 | { |
| 321 | return slave->backup; | 353 | return slave->backup; |
| @@ -343,6 +375,9 @@ static inline bool bond_is_active_slave(struct slave *slave) | |||
| 343 | #define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ | 375 | #define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ |
| 344 | BOND_ARP_VALIDATE_BACKUP) | 376 | BOND_ARP_VALIDATE_BACKUP) |
| 345 | 377 | ||
| 378 | #define BOND_SLAVE_NOTIFY_NOW true | ||
| 379 | #define BOND_SLAVE_NOTIFY_LATER false | ||
| 380 | |||
| 346 | static inline int slave_do_arp_validate(struct bonding *bond, | 381 | static inline int slave_do_arp_validate(struct bonding *bond, |
| 347 | struct slave *slave) | 382 | struct slave *slave) |
| 348 | { | 383 | { |
| @@ -394,17 +429,19 @@ static inline void bond_netpoll_send_skb(const struct slave *slave, | |||
| 394 | } | 429 | } |
| 395 | #endif | 430 | #endif |
| 396 | 431 | ||
| 397 | static inline void bond_set_slave_inactive_flags(struct slave *slave) | 432 | static inline void bond_set_slave_inactive_flags(struct slave *slave, |
| 433 | bool notify) | ||
| 398 | { | 434 | { |
| 399 | if (!bond_is_lb(slave->bond)) | 435 | if (!bond_is_lb(slave->bond)) |
| 400 | bond_set_backup_slave(slave); | 436 | bond_set_slave_state(slave, BOND_STATE_BACKUP, notify); |
| 401 | if (!slave->bond->params.all_slaves_active) | 437 | if (!slave->bond->params.all_slaves_active) |
| 402 | slave->inactive = 1; | 438 | slave->inactive = 1; |
| 403 | } | 439 | } |
| 404 | 440 | ||
| 405 | static inline void bond_set_slave_active_flags(struct slave *slave) | 441 | static inline void bond_set_slave_active_flags(struct slave *slave, |
| 442 | bool notify) | ||
| 406 | { | 443 | { |
| 407 | bond_set_active_slave(slave); | 444 | bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify); |
| 408 | slave->inactive = 0; | 445 | slave->inactive = 0; |
| 409 | } | 446 | } |
| 410 | 447 | ||
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 320bef2dba42..61376abdab39 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
| @@ -144,6 +144,8 @@ | |||
| 144 | 144 | ||
| 145 | #define FLEXCAN_MB_CODE_MASK (0xf0ffffff) | 145 | #define FLEXCAN_MB_CODE_MASK (0xf0ffffff) |
| 146 | 146 | ||
| 147 | #define FLEXCAN_TIMEOUT_US (50) | ||
| 148 | |||
| 147 | /* | 149 | /* |
| 148 | * FLEXCAN hardware feature flags | 150 | * FLEXCAN hardware feature flags |
| 149 | * | 151 | * |
| @@ -262,6 +264,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr) | |||
| 262 | } | 264 | } |
| 263 | #endif | 265 | #endif |
| 264 | 266 | ||
| 267 | static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) | ||
| 268 | { | ||
| 269 | if (!priv->reg_xceiver) | ||
| 270 | return 0; | ||
| 271 | |||
| 272 | return regulator_enable(priv->reg_xceiver); | ||
| 273 | } | ||
| 274 | |||
| 275 | static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv) | ||
| 276 | { | ||
| 277 | if (!priv->reg_xceiver) | ||
| 278 | return 0; | ||
| 279 | |||
| 280 | return regulator_disable(priv->reg_xceiver); | ||
| 281 | } | ||
| 282 | |||
| 265 | static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, | 283 | static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, |
| 266 | u32 reg_esr) | 284 | u32 reg_esr) |
| 267 | { | 285 | { |
| @@ -269,26 +287,95 @@ static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, | |||
| 269 | (reg_esr & FLEXCAN_ESR_ERR_BUS); | 287 | (reg_esr & FLEXCAN_ESR_ERR_BUS); |
| 270 | } | 288 | } |
| 271 | 289 | ||
| 272 | static inline void flexcan_chip_enable(struct flexcan_priv *priv) | 290 | static int flexcan_chip_enable(struct flexcan_priv *priv) |
| 273 | { | 291 | { |
| 274 | struct flexcan_regs __iomem *regs = priv->base; | 292 | struct flexcan_regs __iomem *regs = priv->base; |
| 293 | unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; | ||
| 275 | u32 reg; | 294 | u32 reg; |
| 276 | 295 | ||
| 277 | reg = flexcan_read(®s->mcr); | 296 | reg = flexcan_read(®s->mcr); |
| 278 | reg &= ~FLEXCAN_MCR_MDIS; | 297 | reg &= ~FLEXCAN_MCR_MDIS; |
| 279 | flexcan_write(reg, ®s->mcr); | 298 | flexcan_write(reg, ®s->mcr); |
| 280 | 299 | ||
| 281 | udelay(10); | 300 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) |
| 301 | usleep_range(10, 20); | ||
| 302 | |||
| 303 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) | ||
| 304 | return -ETIMEDOUT; | ||
| 305 | |||
| 306 | return 0; | ||
| 282 | } | 307 | } |
| 283 | 308 | ||
| 284 | static inline void flexcan_chip_disable(struct flexcan_priv *priv) | 309 | static int flexcan_chip_disable(struct flexcan_priv *priv) |
| 285 | { | 310 | { |
| 286 | struct flexcan_regs __iomem *regs = priv->base; | 311 | struct flexcan_regs __iomem *regs = priv->base; |
| 312 | unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; | ||
| 287 | u32 reg; | 313 | u32 reg; |
| 288 | 314 | ||
| 289 | reg = flexcan_read(®s->mcr); | 315 | reg = flexcan_read(®s->mcr); |
| 290 | reg |= FLEXCAN_MCR_MDIS; | 316 | reg |= FLEXCAN_MCR_MDIS; |
| 291 | flexcan_write(reg, ®s->mcr); | 317 | flexcan_write(reg, ®s->mcr); |
| 318 | |||
| 319 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) | ||
| 320 | usleep_range(10, 20); | ||
| 321 | |||
| 322 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) | ||
| 323 | return -ETIMEDOUT; | ||
| 324 | |||
| 325 | return 0; | ||
| 326 | } | ||
| 327 | |||
| 328 | static int flexcan_chip_freeze(struct flexcan_priv *priv) | ||
| 329 | { | ||
| 330 | struct flexcan_regs __iomem *regs = priv->base; | ||
| 331 | unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate; | ||
| 332 | u32 reg; | ||
| 333 | |||
| 334 | reg = flexcan_read(®s->mcr); | ||
| 335 | reg |= FLEXCAN_MCR_HALT; | ||
| 336 | flexcan_write(reg, ®s->mcr); | ||
| 337 | |||
| 338 | while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | ||
| 339 | usleep_range(100, 200); | ||
| 340 | |||
| 341 | if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | ||
| 342 | return -ETIMEDOUT; | ||
| 343 | |||
| 344 | return 0; | ||
| 345 | } | ||
| 346 | |||
| 347 | static int flexcan_chip_unfreeze(struct flexcan_priv *priv) | ||
| 348 | { | ||
| 349 | struct flexcan_regs __iomem *regs = priv->base; | ||
| 350 | unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; | ||
| 351 | u32 reg; | ||
| 352 | |||
| 353 | reg = flexcan_read(®s->mcr); | ||
| 354 | reg &= ~FLEXCAN_MCR_HALT; | ||
| 355 | flexcan_write(reg, ®s->mcr); | ||
| 356 | |||
| 357 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) | ||
| 358 | usleep_range(10, 20); | ||
| 359 | |||
| 360 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK) | ||
| 361 | return -ETIMEDOUT; | ||
| 362 | |||
| 363 | return 0; | ||
| 364 | } | ||
| 365 | |||
| 366 | static int flexcan_chip_softreset(struct flexcan_priv *priv) | ||
| 367 | { | ||
| 368 | struct flexcan_regs __iomem *regs = priv->base; | ||
| 369 | unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; | ||
| 370 | |||
| 371 | flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); | ||
| 372 | while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)) | ||
| 373 | usleep_range(10, 20); | ||
| 374 | |||
| 375 | if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST) | ||
| 376 | return -ETIMEDOUT; | ||
| 377 | |||
| 378 | return 0; | ||
| 292 | } | 379 | } |
| 293 | 380 | ||
| 294 | static int flexcan_get_berr_counter(const struct net_device *dev, | 381 | static int flexcan_get_berr_counter(const struct net_device *dev, |
| @@ -709,19 +796,14 @@ static int flexcan_chip_start(struct net_device *dev) | |||
| 709 | u32 reg_mcr, reg_ctrl; | 796 | u32 reg_mcr, reg_ctrl; |
| 710 | 797 | ||
| 711 | /* enable module */ | 798 | /* enable module */ |
| 712 | flexcan_chip_enable(priv); | 799 | err = flexcan_chip_enable(priv); |
| 800 | if (err) | ||
| 801 | return err; | ||
| 713 | 802 | ||
| 714 | /* soft reset */ | 803 | /* soft reset */ |
| 715 | flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); | 804 | err = flexcan_chip_softreset(priv); |
| 716 | udelay(10); | 805 | if (err) |
| 717 | 806 | goto out_chip_disable; | |
| 718 | reg_mcr = flexcan_read(®s->mcr); | ||
| 719 | if (reg_mcr & FLEXCAN_MCR_SOFTRST) { | ||
| 720 | netdev_err(dev, "Failed to softreset can module (mcr=0x%08x)\n", | ||
| 721 | reg_mcr); | ||
| 722 | err = -ENODEV; | ||
| 723 | goto out; | ||
| 724 | } | ||
| 725 | 807 | ||
| 726 | flexcan_set_bittiming(dev); | 808 | flexcan_set_bittiming(dev); |
| 727 | 809 | ||
| @@ -788,16 +870,14 @@ static int flexcan_chip_start(struct net_device *dev) | |||
| 788 | if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES) | 870 | if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES) |
| 789 | flexcan_write(0x0, ®s->rxfgmask); | 871 | flexcan_write(0x0, ®s->rxfgmask); |
| 790 | 872 | ||
| 791 | if (priv->reg_xceiver) { | 873 | err = flexcan_transceiver_enable(priv); |
| 792 | err = regulator_enable(priv->reg_xceiver); | 874 | if (err) |
| 793 | if (err) | 875 | goto out_chip_disable; |
| 794 | goto out; | ||
| 795 | } | ||
| 796 | 876 | ||
| 797 | /* synchronize with the can bus */ | 877 | /* synchronize with the can bus */ |
| 798 | reg_mcr = flexcan_read(®s->mcr); | 878 | err = flexcan_chip_unfreeze(priv); |
| 799 | reg_mcr &= ~FLEXCAN_MCR_HALT; | 879 | if (err) |
| 800 | flexcan_write(reg_mcr, ®s->mcr); | 880 | goto out_transceiver_disable; |
| 801 | 881 | ||
| 802 | priv->can.state = CAN_STATE_ERROR_ACTIVE; | 882 | priv->can.state = CAN_STATE_ERROR_ACTIVE; |
| 803 | 883 | ||
| @@ -810,7 +890,9 @@ static int flexcan_chip_start(struct net_device *dev) | |||
| 810 | 890 | ||
| 811 | return 0; | 891 | return 0; |
| 812 | 892 | ||
| 813 | out: | 893 | out_transceiver_disable: |
| 894 | flexcan_transceiver_disable(priv); | ||
| 895 | out_chip_disable: | ||
| 814 | flexcan_chip_disable(priv); | 896 | flexcan_chip_disable(priv); |
| 815 | return err; | 897 | return err; |
| 816 | } | 898 | } |
| @@ -825,18 +907,17 @@ static void flexcan_chip_stop(struct net_device *dev) | |||
| 825 | { | 907 | { |
| 826 | struct flexcan_priv *priv = netdev_priv(dev); | 908 | struct flexcan_priv *priv = netdev_priv(dev); |
| 827 | struct flexcan_regs __iomem *regs = priv->base; | 909 | struct flexcan_regs __iomem *regs = priv->base; |
| 828 | u32 reg; | 910 | |
| 911 | /* freeze + disable module */ | ||
| 912 | flexcan_chip_freeze(priv); | ||
| 913 | flexcan_chip_disable(priv); | ||
| 829 | 914 | ||
| 830 | /* Disable all interrupts */ | 915 | /* Disable all interrupts */ |
| 831 | flexcan_write(0, ®s->imask1); | 916 | flexcan_write(0, ®s->imask1); |
| 917 | flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, | ||
| 918 | ®s->ctrl); | ||
| 832 | 919 | ||
| 833 | /* Disable + halt module */ | 920 | flexcan_transceiver_disable(priv); |
| 834 | reg = flexcan_read(®s->mcr); | ||
| 835 | reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT; | ||
| 836 | flexcan_write(reg, ®s->mcr); | ||
| 837 | |||
| 838 | if (priv->reg_xceiver) | ||
| 839 | regulator_disable(priv->reg_xceiver); | ||
| 840 | priv->can.state = CAN_STATE_STOPPED; | 921 | priv->can.state = CAN_STATE_STOPPED; |
| 841 | 922 | ||
| 842 | return; | 923 | return; |
| @@ -866,7 +947,7 @@ static int flexcan_open(struct net_device *dev) | |||
| 866 | /* start chip and queuing */ | 947 | /* start chip and queuing */ |
| 867 | err = flexcan_chip_start(dev); | 948 | err = flexcan_chip_start(dev); |
| 868 | if (err) | 949 | if (err) |
| 869 | goto out_close; | 950 | goto out_free_irq; |
| 870 | 951 | ||
| 871 | can_led_event(dev, CAN_LED_EVENT_OPEN); | 952 | can_led_event(dev, CAN_LED_EVENT_OPEN); |
| 872 | 953 | ||
| @@ -875,6 +956,8 @@ static int flexcan_open(struct net_device *dev) | |||
| 875 | 956 | ||
| 876 | return 0; | 957 | return 0; |
| 877 | 958 | ||
| 959 | out_free_irq: | ||
| 960 | free_irq(dev->irq, dev); | ||
| 878 | out_close: | 961 | out_close: |
| 879 | close_candev(dev); | 962 | close_candev(dev); |
| 880 | out_disable_per: | 963 | out_disable_per: |
| @@ -945,12 +1028,16 @@ static int register_flexcandev(struct net_device *dev) | |||
| 945 | goto out_disable_ipg; | 1028 | goto out_disable_ipg; |
| 946 | 1029 | ||
| 947 | /* select "bus clock", chip must be disabled */ | 1030 | /* select "bus clock", chip must be disabled */ |
| 948 | flexcan_chip_disable(priv); | 1031 | err = flexcan_chip_disable(priv); |
| 1032 | if (err) | ||
| 1033 | goto out_disable_per; | ||
| 949 | reg = flexcan_read(®s->ctrl); | 1034 | reg = flexcan_read(®s->ctrl); |
| 950 | reg |= FLEXCAN_CTRL_CLK_SRC; | 1035 | reg |= FLEXCAN_CTRL_CLK_SRC; |
| 951 | flexcan_write(reg, ®s->ctrl); | 1036 | flexcan_write(reg, ®s->ctrl); |
| 952 | 1037 | ||
| 953 | flexcan_chip_enable(priv); | 1038 | err = flexcan_chip_enable(priv); |
| 1039 | if (err) | ||
| 1040 | goto out_chip_disable; | ||
| 954 | 1041 | ||
| 955 | /* set freeze, halt and activate FIFO, restrict register access */ | 1042 | /* set freeze, halt and activate FIFO, restrict register access */ |
| 956 | reg = flexcan_read(®s->mcr); | 1043 | reg = flexcan_read(®s->mcr); |
| @@ -967,14 +1054,15 @@ static int register_flexcandev(struct net_device *dev) | |||
| 967 | if (!(reg & FLEXCAN_MCR_FEN)) { | 1054 | if (!(reg & FLEXCAN_MCR_FEN)) { |
| 968 | netdev_err(dev, "Could not enable RX FIFO, unsupported core\n"); | 1055 | netdev_err(dev, "Could not enable RX FIFO, unsupported core\n"); |
| 969 | err = -ENODEV; | 1056 | err = -ENODEV; |
| 970 | goto out_disable_per; | 1057 | goto out_chip_disable; |
| 971 | } | 1058 | } |
| 972 | 1059 | ||
| 973 | err = register_candev(dev); | 1060 | err = register_candev(dev); |
| 974 | 1061 | ||
| 975 | out_disable_per: | ||
| 976 | /* disable core and turn off clocks */ | 1062 | /* disable core and turn off clocks */ |
| 1063 | out_chip_disable: | ||
| 977 | flexcan_chip_disable(priv); | 1064 | flexcan_chip_disable(priv); |
| 1065 | out_disable_per: | ||
| 978 | clk_disable_unprepare(priv->clk_per); | 1066 | clk_disable_unprepare(priv->clk_per); |
| 979 | out_disable_ipg: | 1067 | out_disable_ipg: |
| 980 | clk_disable_unprepare(priv->clk_ipg); | 1068 | clk_disable_unprepare(priv->clk_ipg); |
| @@ -1104,9 +1192,10 @@ static int flexcan_probe(struct platform_device *pdev) | |||
| 1104 | static int flexcan_remove(struct platform_device *pdev) | 1192 | static int flexcan_remove(struct platform_device *pdev) |
| 1105 | { | 1193 | { |
| 1106 | struct net_device *dev = platform_get_drvdata(pdev); | 1194 | struct net_device *dev = platform_get_drvdata(pdev); |
| 1195 | struct flexcan_priv *priv = netdev_priv(dev); | ||
| 1107 | 1196 | ||
| 1108 | unregister_flexcandev(dev); | 1197 | unregister_flexcandev(dev); |
| 1109 | 1198 | netif_napi_del(&priv->napi); | |
| 1110 | free_candev(dev); | 1199 | free_candev(dev); |
| 1111 | 1200 | ||
| 1112 | return 0; | 1201 | return 0; |
| @@ -1117,8 +1206,11 @@ static int flexcan_suspend(struct device *device) | |||
| 1117 | { | 1206 | { |
| 1118 | struct net_device *dev = dev_get_drvdata(device); | 1207 | struct net_device *dev = dev_get_drvdata(device); |
| 1119 | struct flexcan_priv *priv = netdev_priv(dev); | 1208 | struct flexcan_priv *priv = netdev_priv(dev); |
| 1209 | int err; | ||
| 1120 | 1210 | ||
| 1121 | flexcan_chip_disable(priv); | 1211 | err = flexcan_chip_disable(priv); |
| 1212 | if (err) | ||
| 1213 | return err; | ||
| 1122 | 1214 | ||
| 1123 | if (netif_running(dev)) { | 1215 | if (netif_running(dev)) { |
| 1124 | netif_stop_queue(dev); | 1216 | netif_stop_queue(dev); |
| @@ -1139,9 +1231,7 @@ static int flexcan_resume(struct device *device) | |||
| 1139 | netif_device_attach(dev); | 1231 | netif_device_attach(dev); |
| 1140 | netif_start_queue(dev); | 1232 | netif_start_queue(dev); |
| 1141 | } | 1233 | } |
| 1142 | flexcan_chip_enable(priv); | 1234 | return flexcan_chip_enable(priv); |
| 1143 | |||
| 1144 | return 0; | ||
| 1145 | } | 1235 | } |
| 1146 | #endif /* CONFIG_PM_SLEEP */ | 1236 | #endif /* CONFIG_PM_SLEEP */ |
| 1147 | 1237 | ||
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 2e45f6ec1bf0..380d24922049 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
| @@ -1248,19 +1248,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1248 | * shared register for the high 32 bits, so only a single, aligned, | 1248 | * shared register for the high 32 bits, so only a single, aligned, |
| 1249 | * 4 GB physical address range can be used for descriptors. | 1249 | * 4 GB physical address range can be used for descriptors. |
| 1250 | */ | 1250 | */ |
| 1251 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && | 1251 | if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { |
| 1252 | !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { | ||
| 1253 | dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); | 1252 | dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); |
| 1254 | } else { | 1253 | } else { |
| 1255 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); | 1254 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
| 1256 | if (err) { | 1255 | if (err) { |
| 1257 | err = dma_set_coherent_mask(&pdev->dev, | 1256 | dev_err(&pdev->dev, "No usable DMA config, aborting\n"); |
| 1258 | DMA_BIT_MASK(32)); | 1257 | goto out_pci_disable; |
| 1259 | if (err) { | ||
| 1260 | dev_err(&pdev->dev, | ||
| 1261 | "No usable DMA config, aborting\n"); | ||
| 1262 | goto out_pci_disable; | ||
| 1263 | } | ||
| 1264 | } | 1258 | } |
| 1265 | } | 1259 | } |
| 1266 | 1260 | ||
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index d5c2d3e912e5..422aab27ea1b 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c | |||
| @@ -2436,7 +2436,7 @@ err_reset: | |||
| 2436 | err_register: | 2436 | err_register: |
| 2437 | err_sw_init: | 2437 | err_sw_init: |
| 2438 | err_eeprom: | 2438 | err_eeprom: |
| 2439 | iounmap(adapter->hw.hw_addr); | 2439 | pci_iounmap(pdev, adapter->hw.hw_addr); |
| 2440 | err_init_netdev: | 2440 | err_init_netdev: |
| 2441 | err_ioremap: | 2441 | err_ioremap: |
| 2442 | free_netdev(netdev); | 2442 | free_netdev(netdev); |
| @@ -2474,7 +2474,7 @@ static void atl1e_remove(struct pci_dev *pdev) | |||
| 2474 | unregister_netdev(netdev); | 2474 | unregister_netdev(netdev); |
| 2475 | atl1e_free_ring_resources(adapter); | 2475 | atl1e_free_ring_resources(adapter); |
| 2476 | atl1e_force_ps(&adapter->hw); | 2476 | atl1e_force_ps(&adapter->hw); |
| 2477 | iounmap(adapter->hw.hw_addr); | 2477 | pci_iounmap(pdev, adapter->hw.hw_addr); |
| 2478 | pci_release_regions(pdev); | 2478 | pci_release_regions(pdev); |
| 2479 | free_netdev(netdev); | 2479 | free_netdev(netdev); |
| 2480 | pci_disable_device(pdev); | 2480 | pci_disable_device(pdev); |
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 1f7b5aa114fa..8a7bf7dad898 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c | |||
| @@ -1484,6 +1484,10 @@ static int b44_open(struct net_device *dev) | |||
| 1484 | add_timer(&bp->timer); | 1484 | add_timer(&bp->timer); |
| 1485 | 1485 | ||
| 1486 | b44_enable_ints(bp); | 1486 | b44_enable_ints(bp); |
| 1487 | |||
| 1488 | if (bp->flags & B44_FLAG_EXTERNAL_PHY) | ||
| 1489 | phy_start(bp->phydev); | ||
| 1490 | |||
| 1487 | netif_start_queue(dev); | 1491 | netif_start_queue(dev); |
| 1488 | out: | 1492 | out: |
| 1489 | return err; | 1493 | return err; |
| @@ -1646,6 +1650,9 @@ static int b44_close(struct net_device *dev) | |||
| 1646 | 1650 | ||
| 1647 | netif_stop_queue(dev); | 1651 | netif_stop_queue(dev); |
| 1648 | 1652 | ||
| 1653 | if (bp->flags & B44_FLAG_EXTERNAL_PHY) | ||
| 1654 | phy_stop(bp->phydev); | ||
| 1655 | |||
| 1649 | napi_disable(&bp->napi); | 1656 | napi_disable(&bp->napi); |
| 1650 | 1657 | ||
| 1651 | del_timer_sync(&bp->timer); | 1658 | del_timer_sync(&bp->timer); |
| @@ -2222,7 +2229,12 @@ static void b44_adjust_link(struct net_device *dev) | |||
| 2222 | } | 2229 | } |
| 2223 | 2230 | ||
| 2224 | if (status_changed) { | 2231 | if (status_changed) { |
| 2225 | b44_check_phy(bp); | 2232 | u32 val = br32(bp, B44_TX_CTRL); |
| 2233 | if (bp->flags & B44_FLAG_FULL_DUPLEX) | ||
| 2234 | val |= TX_CTRL_DUPLEX; | ||
| 2235 | else | ||
| 2236 | val &= ~TX_CTRL_DUPLEX; | ||
| 2237 | bw32(bp, B44_TX_CTRL, val); | ||
| 2226 | phy_print_status(phydev); | 2238 | phy_print_status(phydev); |
| 2227 | } | 2239 | } |
| 2228 | } | 2240 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index cda25ac45b47..6c9e1c9bdeb8 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c | |||
| @@ -2507,6 +2507,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent) | |||
| 2507 | 2507 | ||
| 2508 | bp->fw_wr_seq++; | 2508 | bp->fw_wr_seq++; |
| 2509 | msg_data |= bp->fw_wr_seq; | 2509 | msg_data |= bp->fw_wr_seq; |
| 2510 | bp->fw_last_msg = msg_data; | ||
| 2510 | 2511 | ||
| 2511 | bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); | 2512 | bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); |
| 2512 | 2513 | ||
| @@ -4000,8 +4001,23 @@ bnx2_setup_wol(struct bnx2 *bp) | |||
| 4000 | wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; | 4001 | wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; |
| 4001 | } | 4002 | } |
| 4002 | 4003 | ||
| 4003 | if (!(bp->flags & BNX2_FLAG_NO_WOL)) | 4004 | if (!(bp->flags & BNX2_FLAG_NO_WOL)) { |
| 4004 | bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0); | 4005 | u32 val; |
| 4006 | |||
| 4007 | wol_msg |= BNX2_DRV_MSG_DATA_WAIT3; | ||
| 4008 | if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) { | ||
| 4009 | bnx2_fw_sync(bp, wol_msg, 1, 0); | ||
| 4010 | return; | ||
| 4011 | } | ||
| 4012 | /* Tell firmware not to power down the PHY yet, otherwise | ||
| 4013 | * the chip will take a long time to respond to MMIO reads. | ||
| 4014 | */ | ||
| 4015 | val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); | ||
| 4016 | bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, | ||
| 4017 | val | BNX2_PORT_FEATURE_ASF_ENABLED); | ||
| 4018 | bnx2_fw_sync(bp, wol_msg, 1, 0); | ||
| 4019 | bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val); | ||
| 4020 | } | ||
| 4005 | 4021 | ||
| 4006 | } | 4022 | } |
| 4007 | 4023 | ||
| @@ -4033,9 +4049,22 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) | |||
| 4033 | 4049 | ||
| 4034 | if (bp->wol) | 4050 | if (bp->wol) |
| 4035 | pci_set_power_state(bp->pdev, PCI_D3hot); | 4051 | pci_set_power_state(bp->pdev, PCI_D3hot); |
| 4036 | } else { | 4052 | break; |
| 4037 | pci_set_power_state(bp->pdev, PCI_D3hot); | 4053 | |
| 4054 | } | ||
| 4055 | if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) { | ||
| 4056 | u32 val; | ||
| 4057 | |||
| 4058 | /* Tell firmware not to power down the PHY yet, | ||
| 4059 | * otherwise the other port may not respond to | ||
| 4060 | * MMIO reads. | ||
| 4061 | */ | ||
| 4062 | val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); | ||
| 4063 | val &= ~BNX2_CONDITION_PM_STATE_MASK; | ||
| 4064 | val |= BNX2_CONDITION_PM_STATE_UNPREP; | ||
| 4065 | bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val); | ||
| 4038 | } | 4066 | } |
| 4067 | pci_set_power_state(bp->pdev, PCI_D3hot); | ||
| 4039 | 4068 | ||
| 4040 | /* No more memory access after this point until | 4069 | /* No more memory access after this point until |
| 4041 | * device is brought back to D0. | 4070 | * device is brought back to D0. |
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index f1cf2c44e7ed..e341bc366fa5 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h | |||
| @@ -6900,6 +6900,7 @@ struct bnx2 { | |||
| 6900 | 6900 | ||
| 6901 | u16 fw_wr_seq; | 6901 | u16 fw_wr_seq; |
| 6902 | u16 fw_drv_pulse_wr_seq; | 6902 | u16 fw_drv_pulse_wr_seq; |
| 6903 | u32 fw_last_msg; | ||
| 6903 | 6904 | ||
| 6904 | int rx_max_ring; | 6905 | int rx_max_ring; |
| 6905 | int rx_ring_size; | 6906 | int rx_ring_size; |
| @@ -7406,6 +7407,10 @@ struct bnx2_rv2p_fw_file { | |||
| 7406 | #define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000 | 7407 | #define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000 |
| 7407 | #define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000 | 7408 | #define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000 |
| 7408 | #define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000 | 7409 | #define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000 |
| 7410 | #define BNX2_CONDITION_PM_STATE_MASK 0x00030000 | ||
| 7411 | #define BNX2_CONDITION_PM_STATE_FULL 0x00030000 | ||
| 7412 | #define BNX2_CONDITION_PM_STATE_PREP 0x00020000 | ||
| 7413 | #define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000 | ||
| 7409 | 7414 | ||
| 7410 | #define BNX2_BC_STATE_DEBUG_CMD 0x1dc | 7415 | #define BNX2_BC_STATE_DEBUG_CMD 0x1dc |
| 7411 | #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 | 7416 | #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 66c0df78c3ff..dbcff509dc3f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
| @@ -3875,7 +3875,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3875 | xmit_type); | 3875 | xmit_type); |
| 3876 | } | 3876 | } |
| 3877 | 3877 | ||
| 3878 | /* Add the macs to the parsing BD this is a vf */ | 3878 | /* Add the macs to the parsing BD if this is a vf or if |
| 3879 | * Tx Switching is enabled. | ||
| 3880 | */ | ||
| 3879 | if (IS_VF(bp)) { | 3881 | if (IS_VF(bp)) { |
| 3880 | /* override GRE parameters in BD */ | 3882 | /* override GRE parameters in BD */ |
| 3881 | bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, | 3883 | bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, |
| @@ -3887,6 +3889,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3887 | &pbd_e2->data.mac_addr.dst_mid, | 3889 | &pbd_e2->data.mac_addr.dst_mid, |
| 3888 | &pbd_e2->data.mac_addr.dst_lo, | 3890 | &pbd_e2->data.mac_addr.dst_lo, |
| 3889 | eth->h_dest); | 3891 | eth->h_dest); |
| 3892 | } else if (bp->flags & TX_SWITCHING) { | ||
| 3893 | bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, | ||
| 3894 | &pbd_e2->data.mac_addr.dst_mid, | ||
| 3895 | &pbd_e2->data.mac_addr.dst_lo, | ||
| 3896 | eth->h_dest); | ||
| 3890 | } | 3897 | } |
| 3891 | 3898 | ||
| 3892 | SET_FLAG(pbd_e2_parsing_data, | 3899 | SET_FLAG(pbd_e2_parsing_data, |
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index fcf9105a5476..09f3fefcbf9c 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* cnic.c: Broadcom CNIC core network driver. | 1 | /* cnic.c: Broadcom CNIC core network driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2006-2013 Broadcom Corporation | 3 | * Copyright (c) 2006-2014 Broadcom Corporation |
| 4 | * | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
| @@ -342,7 +342,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, | |||
| 342 | while (retry < 3) { | 342 | while (retry < 3) { |
| 343 | rc = 0; | 343 | rc = 0; |
| 344 | rcu_read_lock(); | 344 | rcu_read_lock(); |
| 345 | ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); | 345 | ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); |
| 346 | if (ulp_ops) | 346 | if (ulp_ops) |
| 347 | rc = ulp_ops->iscsi_nl_send_msg( | 347 | rc = ulp_ops->iscsi_nl_send_msg( |
| 348 | cp->ulp_handle[CNIC_ULP_ISCSI], | 348 | cp->ulp_handle[CNIC_ULP_ISCSI], |
| @@ -726,7 +726,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) | |||
| 726 | 726 | ||
| 727 | for (i = 0; i < dma->num_pages; i++) { | 727 | for (i = 0; i < dma->num_pages; i++) { |
| 728 | if (dma->pg_arr[i]) { | 728 | if (dma->pg_arr[i]) { |
| 729 | dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE, | 729 | dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE, |
| 730 | dma->pg_arr[i], dma->pg_map_arr[i]); | 730 | dma->pg_arr[i], dma->pg_map_arr[i]); |
| 731 | dma->pg_arr[i] = NULL; | 731 | dma->pg_arr[i] = NULL; |
| 732 | } | 732 | } |
| @@ -785,7 +785,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, | |||
| 785 | 785 | ||
| 786 | for (i = 0; i < pages; i++) { | 786 | for (i = 0; i < pages; i++) { |
| 787 | dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, | 787 | dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, |
| 788 | BNX2_PAGE_SIZE, | 788 | CNIC_PAGE_SIZE, |
| 789 | &dma->pg_map_arr[i], | 789 | &dma->pg_map_arr[i], |
| 790 | GFP_ATOMIC); | 790 | GFP_ATOMIC); |
| 791 | if (dma->pg_arr[i] == NULL) | 791 | if (dma->pg_arr[i] == NULL) |
| @@ -794,8 +794,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, | |||
| 794 | if (!use_pg_tbl) | 794 | if (!use_pg_tbl) |
| 795 | return 0; | 795 | return 0; |
| 796 | 796 | ||
| 797 | dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) & | 797 | dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) & |
| 798 | ~(BNX2_PAGE_SIZE - 1); | 798 | ~(CNIC_PAGE_SIZE - 1); |
| 799 | dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, | 799 | dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, |
| 800 | &dma->pgtbl_map, GFP_ATOMIC); | 800 | &dma->pgtbl_map, GFP_ATOMIC); |
| 801 | if (dma->pgtbl == NULL) | 801 | if (dma->pgtbl == NULL) |
| @@ -900,8 +900,8 @@ static int cnic_alloc_context(struct cnic_dev *dev) | |||
| 900 | if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { | 900 | if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { |
| 901 | int i, k, arr_size; | 901 | int i, k, arr_size; |
| 902 | 902 | ||
| 903 | cp->ctx_blk_size = BNX2_PAGE_SIZE; | 903 | cp->ctx_blk_size = CNIC_PAGE_SIZE; |
| 904 | cp->cids_per_blk = BNX2_PAGE_SIZE / 128; | 904 | cp->cids_per_blk = CNIC_PAGE_SIZE / 128; |
| 905 | arr_size = BNX2_MAX_CID / cp->cids_per_blk * | 905 | arr_size = BNX2_MAX_CID / cp->cids_per_blk * |
| 906 | sizeof(struct cnic_ctx); | 906 | sizeof(struct cnic_ctx); |
| 907 | cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); | 907 | cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); |
| @@ -933,7 +933,7 @@ static int cnic_alloc_context(struct cnic_dev *dev) | |||
| 933 | for (i = 0; i < cp->ctx_blks; i++) { | 933 | for (i = 0; i < cp->ctx_blks; i++) { |
| 934 | cp->ctx_arr[i].ctx = | 934 | cp->ctx_arr[i].ctx = |
| 935 | dma_alloc_coherent(&dev->pcidev->dev, | 935 | dma_alloc_coherent(&dev->pcidev->dev, |
| 936 | BNX2_PAGE_SIZE, | 936 | CNIC_PAGE_SIZE, |
| 937 | &cp->ctx_arr[i].mapping, | 937 | &cp->ctx_arr[i].mapping, |
| 938 | GFP_KERNEL); | 938 | GFP_KERNEL); |
| 939 | if (cp->ctx_arr[i].ctx == NULL) | 939 | if (cp->ctx_arr[i].ctx == NULL) |
| @@ -1013,7 +1013,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) | |||
| 1013 | if (udev->l2_ring) | 1013 | if (udev->l2_ring) |
| 1014 | return 0; | 1014 | return 0; |
| 1015 | 1015 | ||
| 1016 | udev->l2_ring_size = pages * BNX2_PAGE_SIZE; | 1016 | udev->l2_ring_size = pages * CNIC_PAGE_SIZE; |
| 1017 | udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, | 1017 | udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, |
| 1018 | &udev->l2_ring_map, | 1018 | &udev->l2_ring_map, |
| 1019 | GFP_KERNEL | __GFP_COMP); | 1019 | GFP_KERNEL | __GFP_COMP); |
| @@ -1021,7 +1021,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) | |||
| 1021 | return -ENOMEM; | 1021 | return -ENOMEM; |
| 1022 | 1022 | ||
| 1023 | udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; | 1023 | udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; |
| 1024 | udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); | 1024 | udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size); |
| 1025 | udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, | 1025 | udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, |
| 1026 | &udev->l2_buf_map, | 1026 | &udev->l2_buf_map, |
| 1027 | GFP_KERNEL | __GFP_COMP); | 1027 | GFP_KERNEL | __GFP_COMP); |
| @@ -1102,7 +1102,7 @@ static int cnic_init_uio(struct cnic_dev *dev) | |||
| 1102 | uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + | 1102 | uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + |
| 1103 | TX_MAX_TSS_RINGS + 1); | 1103 | TX_MAX_TSS_RINGS + 1); |
| 1104 | uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & | 1104 | uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & |
| 1105 | PAGE_MASK; | 1105 | CNIC_PAGE_MASK; |
| 1106 | if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) | 1106 | if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) |
| 1107 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; | 1107 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; |
| 1108 | else | 1108 | else |
| @@ -1113,7 +1113,7 @@ static int cnic_init_uio(struct cnic_dev *dev) | |||
| 1113 | uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); | 1113 | uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); |
| 1114 | 1114 | ||
| 1115 | uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & | 1115 | uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & |
| 1116 | PAGE_MASK; | 1116 | CNIC_PAGE_MASK; |
| 1117 | uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); | 1117 | uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); |
| 1118 | 1118 | ||
| 1119 | uinfo->name = "bnx2x_cnic"; | 1119 | uinfo->name = "bnx2x_cnic"; |
| @@ -1267,14 +1267,14 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
| 1267 | for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) | 1267 | for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) |
| 1268 | cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; | 1268 | cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; |
| 1269 | 1269 | ||
| 1270 | pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / | 1270 | pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / |
| 1271 | PAGE_SIZE; | 1271 | CNIC_PAGE_SIZE; |
| 1272 | 1272 | ||
| 1273 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); | 1273 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); |
| 1274 | if (ret) | 1274 | if (ret) |
| 1275 | return -ENOMEM; | 1275 | return -ENOMEM; |
| 1276 | 1276 | ||
| 1277 | n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; | 1277 | n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; |
| 1278 | for (i = 0, j = 0; i < cp->max_cid_space; i++) { | 1278 | for (i = 0, j = 0; i < cp->max_cid_space; i++) { |
| 1279 | long off = CNIC_KWQ16_DATA_SIZE * (i % n); | 1279 | long off = CNIC_KWQ16_DATA_SIZE * (i % n); |
| 1280 | 1280 | ||
| @@ -1296,7 +1296,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
| 1296 | goto error; | 1296 | goto error; |
| 1297 | } | 1297 | } |
| 1298 | 1298 | ||
| 1299 | pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; | 1299 | pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE; |
| 1300 | ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); | 1300 | ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); |
| 1301 | if (ret) | 1301 | if (ret) |
| 1302 | goto error; | 1302 | goto error; |
| @@ -1466,8 +1466,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
| 1466 | cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * | 1466 | cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * |
| 1467 | BNX2X_ISCSI_R2TQE_SIZE; | 1467 | BNX2X_ISCSI_R2TQE_SIZE; |
| 1468 | cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; | 1468 | cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; |
| 1469 | pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; | 1469 | pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; |
| 1470 | hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); | 1470 | hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); |
| 1471 | cp->num_cqs = req1->num_cqs; | 1471 | cp->num_cqs = req1->num_cqs; |
| 1472 | 1472 | ||
| 1473 | if (!dev->max_iscsi_conn) | 1473 | if (!dev->max_iscsi_conn) |
| @@ -1477,9 +1477,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
| 1477 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), | 1477 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), |
| 1478 | req1->rq_num_wqes); | 1478 | req1->rq_num_wqes); |
| 1479 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1479 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
| 1480 | PAGE_SIZE); | 1480 | CNIC_PAGE_SIZE); |
| 1481 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | 1481 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + |
| 1482 | TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1482 | TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
| 1483 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + | 1483 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + |
| 1484 | TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1484 | TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
| 1485 | req1->num_tasks_per_conn); | 1485 | req1->num_tasks_per_conn); |
| @@ -1489,9 +1489,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
| 1489 | USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), | 1489 | USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), |
| 1490 | req1->rq_buffer_size); | 1490 | req1->rq_buffer_size); |
| 1491 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1491 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
| 1492 | PAGE_SIZE); | 1492 | CNIC_PAGE_SIZE); |
| 1493 | CNIC_WR8(dev, BAR_USTRORM_INTMEM + | 1493 | CNIC_WR8(dev, BAR_USTRORM_INTMEM + |
| 1494 | USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1494 | USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
| 1495 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + | 1495 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + |
| 1496 | USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1496 | USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
| 1497 | req1->num_tasks_per_conn); | 1497 | req1->num_tasks_per_conn); |
| @@ -1504,9 +1504,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
| 1504 | 1504 | ||
| 1505 | /* init Xstorm RAM */ | 1505 | /* init Xstorm RAM */ |
| 1506 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1506 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
| 1507 | PAGE_SIZE); | 1507 | CNIC_PAGE_SIZE); |
| 1508 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | 1508 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + |
| 1509 | XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1509 | XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
| 1510 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + | 1510 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + |
| 1511 | XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1511 | XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
| 1512 | req1->num_tasks_per_conn); | 1512 | req1->num_tasks_per_conn); |
| @@ -1519,9 +1519,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
| 1519 | 1519 | ||
| 1520 | /* init Cstorm RAM */ | 1520 | /* init Cstorm RAM */ |
| 1521 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1521 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
| 1522 | PAGE_SIZE); | 1522 | CNIC_PAGE_SIZE); |
| 1523 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | 1523 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + |
| 1524 | CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1524 | CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
| 1525 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | 1525 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + |
| 1526 | CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1526 | CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
| 1527 | req1->num_tasks_per_conn); | 1527 | req1->num_tasks_per_conn); |
| @@ -1623,18 +1623,18 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) | |||
| 1623 | } | 1623 | } |
| 1624 | 1624 | ||
| 1625 | ctx->cid = cid; | 1625 | ctx->cid = cid; |
| 1626 | pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; | 1626 | pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE; |
| 1627 | 1627 | ||
| 1628 | ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); | 1628 | ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); |
| 1629 | if (ret) | 1629 | if (ret) |
| 1630 | goto error; | 1630 | goto error; |
| 1631 | 1631 | ||
| 1632 | pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; | 1632 | pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE; |
| 1633 | ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); | 1633 | ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); |
| 1634 | if (ret) | 1634 | if (ret) |
| 1635 | goto error; | 1635 | goto error; |
| 1636 | 1636 | ||
| 1637 | pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; | 1637 | pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; |
| 1638 | ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); | 1638 | ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); |
| 1639 | if (ret) | 1639 | if (ret) |
| 1640 | goto error; | 1640 | goto error; |
| @@ -1760,7 +1760,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], | |||
| 1760 | ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; | 1760 | ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; |
| 1761 | /* TSTORM requires the base address of RQ DB & not PTE */ | 1761 | /* TSTORM requires the base address of RQ DB & not PTE */ |
| 1762 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = | 1762 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = |
| 1763 | req2->rq_page_table_addr_lo & PAGE_MASK; | 1763 | req2->rq_page_table_addr_lo & CNIC_PAGE_MASK; |
| 1764 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = | 1764 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = |
| 1765 | req2->rq_page_table_addr_hi; | 1765 | req2->rq_page_table_addr_hi; |
| 1766 | ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; | 1766 | ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; |
| @@ -1842,7 +1842,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], | |||
| 1842 | /* CSTORM and USTORM initialization is different, CSTORM requires | 1842 | /* CSTORM and USTORM initialization is different, CSTORM requires |
| 1843 | * CQ DB base & not PTE addr */ | 1843 | * CQ DB base & not PTE addr */ |
| 1844 | ictx->cstorm_st_context.cq_db_base.lo = | 1844 | ictx->cstorm_st_context.cq_db_base.lo = |
| 1845 | req1->cq_page_table_addr_lo & PAGE_MASK; | 1845 | req1->cq_page_table_addr_lo & CNIC_PAGE_MASK; |
| 1846 | ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; | 1846 | ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; |
| 1847 | ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; | 1847 | ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; |
| 1848 | ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; | 1848 | ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; |
| @@ -2911,7 +2911,7 @@ static int cnic_l2_completion(struct cnic_local *cp) | |||
| 2911 | u16 hw_cons, sw_cons; | 2911 | u16 hw_cons, sw_cons; |
| 2912 | struct cnic_uio_dev *udev = cp->udev; | 2912 | struct cnic_uio_dev *udev = cp->udev; |
| 2913 | union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) | 2913 | union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) |
| 2914 | (udev->l2_ring + (2 * BNX2_PAGE_SIZE)); | 2914 | (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); |
| 2915 | u32 cmd; | 2915 | u32 cmd; |
| 2916 | int comp = 0; | 2916 | int comp = 0; |
| 2917 | 2917 | ||
| @@ -3244,7 +3244,8 @@ static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type) | |||
| 3244 | int rc; | 3244 | int rc; |
| 3245 | 3245 | ||
| 3246 | mutex_lock(&cnic_lock); | 3246 | mutex_lock(&cnic_lock); |
| 3247 | ulp_ops = cnic_ulp_tbl_prot(ulp_type); | 3247 | ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type], |
| 3248 | lockdep_is_held(&cnic_lock)); | ||
| 3248 | if (ulp_ops && ulp_ops->cnic_get_stats) | 3249 | if (ulp_ops && ulp_ops->cnic_get_stats) |
| 3249 | rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); | 3250 | rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); |
| 3250 | else | 3251 | else |
| @@ -4384,7 +4385,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) | |||
| 4384 | u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; | 4385 | u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; |
| 4385 | u32 val; | 4386 | u32 val; |
| 4386 | 4387 | ||
| 4387 | memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE); | 4388 | memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE); |
| 4388 | 4389 | ||
| 4389 | CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, | 4390 | CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, |
| 4390 | (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); | 4391 | (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); |
| @@ -4628,7 +4629,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) | |||
| 4628 | val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); | 4629 | val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); |
| 4629 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); | 4630 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); |
| 4630 | 4631 | ||
| 4631 | rxbd = udev->l2_ring + BNX2_PAGE_SIZE; | 4632 | rxbd = udev->l2_ring + CNIC_PAGE_SIZE; |
| 4632 | for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) { | 4633 | for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) { |
| 4633 | dma_addr_t buf_map; | 4634 | dma_addr_t buf_map; |
| 4634 | int n = (i % cp->l2_rx_ring_size) + 1; | 4635 | int n = (i % cp->l2_rx_ring_size) + 1; |
| @@ -4639,11 +4640,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) | |||
| 4639 | rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; | 4640 | rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; |
| 4640 | rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; | 4641 | rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; |
| 4641 | } | 4642 | } |
| 4642 | val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; | 4643 | val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; |
| 4643 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); | 4644 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); |
| 4644 | rxbd->rx_bd_haddr_hi = val; | 4645 | rxbd->rx_bd_haddr_hi = val; |
| 4645 | 4646 | ||
| 4646 | val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; | 4647 | val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; |
| 4647 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); | 4648 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); |
| 4648 | rxbd->rx_bd_haddr_lo = val; | 4649 | rxbd->rx_bd_haddr_lo = val; |
| 4649 | 4650 | ||
| @@ -4709,10 +4710,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
| 4709 | 4710 | ||
| 4710 | val = CNIC_RD(dev, BNX2_MQ_CONFIG); | 4711 | val = CNIC_RD(dev, BNX2_MQ_CONFIG); |
| 4711 | val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; | 4712 | val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; |
| 4712 | if (BNX2_PAGE_BITS > 12) | 4713 | if (CNIC_PAGE_BITS > 12) |
| 4713 | val |= (12 - 8) << 4; | 4714 | val |= (12 - 8) << 4; |
| 4714 | else | 4715 | else |
| 4715 | val |= (BNX2_PAGE_BITS - 8) << 4; | 4716 | val |= (CNIC_PAGE_BITS - 8) << 4; |
| 4716 | 4717 | ||
| 4717 | CNIC_WR(dev, BNX2_MQ_CONFIG, val); | 4718 | CNIC_WR(dev, BNX2_MQ_CONFIG, val); |
| 4718 | 4719 | ||
| @@ -4742,13 +4743,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
| 4742 | 4743 | ||
| 4743 | /* Initialize the kernel work queue context. */ | 4744 | /* Initialize the kernel work queue context. */ |
| 4744 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | | 4745 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | |
| 4745 | (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; | 4746 | (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; |
| 4746 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); | 4747 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); |
| 4747 | 4748 | ||
| 4748 | val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; | 4749 | val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; |
| 4749 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); | 4750 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); |
| 4750 | 4751 | ||
| 4751 | val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; | 4752 | val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; |
| 4752 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); | 4753 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); |
| 4753 | 4754 | ||
| 4754 | val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); | 4755 | val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); |
| @@ -4768,13 +4769,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
| 4768 | 4769 | ||
| 4769 | /* Initialize the kernel complete queue context. */ | 4770 | /* Initialize the kernel complete queue context. */ |
| 4770 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | | 4771 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | |
| 4771 | (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; | 4772 | (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; |
| 4772 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); | 4773 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); |
| 4773 | 4774 | ||
| 4774 | val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; | 4775 | val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; |
| 4775 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); | 4776 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); |
| 4776 | 4777 | ||
| 4777 | val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; | 4778 | val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; |
| 4778 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); | 4779 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); |
| 4779 | 4780 | ||
| 4780 | val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); | 4781 | val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); |
| @@ -4918,7 +4919,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, | |||
| 4918 | u32 cli = cp->ethdev->iscsi_l2_client_id; | 4919 | u32 cli = cp->ethdev->iscsi_l2_client_id; |
| 4919 | u32 val; | 4920 | u32 val; |
| 4920 | 4921 | ||
| 4921 | memset(txbd, 0, BNX2_PAGE_SIZE); | 4922 | memset(txbd, 0, CNIC_PAGE_SIZE); |
| 4922 | 4923 | ||
| 4923 | buf_map = udev->l2_buf_map; | 4924 | buf_map = udev->l2_buf_map; |
| 4924 | for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) { | 4925 | for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) { |
| @@ -4978,9 +4979,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, | |||
| 4978 | struct bnx2x *bp = netdev_priv(dev->netdev); | 4979 | struct bnx2x *bp = netdev_priv(dev->netdev); |
| 4979 | struct cnic_uio_dev *udev = cp->udev; | 4980 | struct cnic_uio_dev *udev = cp->udev; |
| 4980 | struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + | 4981 | struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + |
| 4981 | BNX2_PAGE_SIZE); | 4982 | CNIC_PAGE_SIZE); |
| 4982 | struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) | 4983 | struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) |
| 4983 | (udev->l2_ring + (2 * BNX2_PAGE_SIZE)); | 4984 | (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); |
| 4984 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; | 4985 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; |
| 4985 | int i; | 4986 | int i; |
| 4986 | u32 cli = cp->ethdev->iscsi_l2_client_id; | 4987 | u32 cli = cp->ethdev->iscsi_l2_client_id; |
| @@ -5004,20 +5005,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, | |||
| 5004 | rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); | 5005 | rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); |
| 5005 | } | 5006 | } |
| 5006 | 5007 | ||
| 5007 | val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; | 5008 | val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; |
| 5008 | rxbd->addr_hi = cpu_to_le32(val); | 5009 | rxbd->addr_hi = cpu_to_le32(val); |
| 5009 | data->rx.bd_page_base.hi = cpu_to_le32(val); | 5010 | data->rx.bd_page_base.hi = cpu_to_le32(val); |
| 5010 | 5011 | ||
| 5011 | val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; | 5012 | val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; |
| 5012 | rxbd->addr_lo = cpu_to_le32(val); | 5013 | rxbd->addr_lo = cpu_to_le32(val); |
| 5013 | data->rx.bd_page_base.lo = cpu_to_le32(val); | 5014 | data->rx.bd_page_base.lo = cpu_to_le32(val); |
| 5014 | 5015 | ||
| 5015 | rxcqe += BNX2X_MAX_RCQ_DESC_CNT; | 5016 | rxcqe += BNX2X_MAX_RCQ_DESC_CNT; |
| 5016 | val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32; | 5017 | val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32; |
| 5017 | rxcqe->addr_hi = cpu_to_le32(val); | 5018 | rxcqe->addr_hi = cpu_to_le32(val); |
| 5018 | data->rx.cqe_page_base.hi = cpu_to_le32(val); | 5019 | data->rx.cqe_page_base.hi = cpu_to_le32(val); |
| 5019 | 5020 | ||
| 5020 | val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff; | 5021 | val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff; |
| 5021 | rxcqe->addr_lo = cpu_to_le32(val); | 5022 | rxcqe->addr_lo = cpu_to_le32(val); |
| 5022 | data->rx.cqe_page_base.lo = cpu_to_le32(val); | 5023 | data->rx.cqe_page_base.lo = cpu_to_le32(val); |
| 5023 | 5024 | ||
| @@ -5265,8 +5266,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev) | |||
| 5265 | msleep(10); | 5266 | msleep(10); |
| 5266 | } | 5267 | } |
| 5267 | clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); | 5268 | clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); |
| 5268 | rx_ring = udev->l2_ring + BNX2_PAGE_SIZE; | 5269 | rx_ring = udev->l2_ring + CNIC_PAGE_SIZE; |
| 5269 | memset(rx_ring, 0, BNX2_PAGE_SIZE); | 5270 | memset(rx_ring, 0, CNIC_PAGE_SIZE); |
| 5270 | } | 5271 | } |
| 5271 | 5272 | ||
| 5272 | static int cnic_register_netdev(struct cnic_dev *dev) | 5273 | static int cnic_register_netdev(struct cnic_dev *dev) |
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index 0d6b13f854d9..d535ae4228b4 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* cnic.h: Broadcom CNIC core network driver. | 1 | /* cnic.h: Broadcom CNIC core network driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2006-2013 Broadcom Corporation | 3 | * Copyright (c) 2006-2014 Broadcom Corporation |
| 4 | * | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h index 95a8e4b11c9f..dcbca6997e8f 100644 --- a/drivers/net/ethernet/broadcom/cnic_defs.h +++ b/drivers/net/ethernet/broadcom/cnic_defs.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | 1 | ||
| 2 | /* cnic.c: Broadcom CNIC core network driver. | 2 | /* cnic.c: Broadcom CNIC core network driver. |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2006-2013 Broadcom Corporation | 4 | * Copyright (c) 2006-2014 Broadcom Corporation |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 8cf6b1926069..5f4d5573a73d 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* cnic_if.h: Broadcom CNIC core network driver. | 1 | /* cnic_if.h: Broadcom CNIC core network driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2006-2013 Broadcom Corporation | 3 | * Copyright (c) 2006-2014 Broadcom Corporation |
| 4 | * | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
| @@ -14,8 +14,8 @@ | |||
| 14 | 14 | ||
| 15 | #include "bnx2x/bnx2x_mfw_req.h" | 15 | #include "bnx2x/bnx2x_mfw_req.h" |
| 16 | 16 | ||
| 17 | #define CNIC_MODULE_VERSION "2.5.19" | 17 | #define CNIC_MODULE_VERSION "2.5.20" |
| 18 | #define CNIC_MODULE_RELDATE "December 19, 2013" | 18 | #define CNIC_MODULE_RELDATE "March 14, 2014" |
| 19 | 19 | ||
| 20 | #define CNIC_ULP_RDMA 0 | 20 | #define CNIC_ULP_RDMA 0 |
| 21 | #define CNIC_ULP_ISCSI 1 | 21 | #define CNIC_ULP_ISCSI 1 |
| @@ -24,6 +24,16 @@ | |||
| 24 | #define MAX_CNIC_ULP_TYPE_EXT 3 | 24 | #define MAX_CNIC_ULP_TYPE_EXT 3 |
| 25 | #define MAX_CNIC_ULP_TYPE 4 | 25 | #define MAX_CNIC_ULP_TYPE 4 |
| 26 | 26 | ||
| 27 | /* Use CPU native page size up to 16K for cnic ring sizes. */ | ||
| 28 | #if (PAGE_SHIFT > 14) | ||
| 29 | #define CNIC_PAGE_BITS 14 | ||
| 30 | #else | ||
| 31 | #define CNIC_PAGE_BITS PAGE_SHIFT | ||
| 32 | #endif | ||
| 33 | #define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS)) | ||
| 34 | #define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE) | ||
| 35 | #define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1)) | ||
| 36 | |||
| 27 | struct kwqe { | 37 | struct kwqe { |
| 28 | u32 kwqe_op_flag; | 38 | u32 kwqe_op_flag; |
| 29 | 39 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3167ed6593b0..3b6d0ba86c71 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -6843,8 +6843,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
| 6843 | 6843 | ||
| 6844 | work_mask |= opaque_key; | 6844 | work_mask |= opaque_key; |
| 6845 | 6845 | ||
| 6846 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && | 6846 | if (desc->err_vlan & RXD_ERR_MASK) { |
| 6847 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { | ||
| 6848 | drop_it: | 6847 | drop_it: |
| 6849 | tg3_recycle_rx(tnapi, tpr, opaque_key, | 6848 | tg3_recycle_rx(tnapi, tpr, opaque_key, |
| 6850 | desc_idx, *post_ptr); | 6849 | desc_idx, *post_ptr); |
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index ef472385bce4..04321e5a356e 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h | |||
| @@ -2608,7 +2608,11 @@ struct tg3_rx_buffer_desc { | |||
| 2608 | #define RXD_ERR_TOO_SMALL 0x00400000 | 2608 | #define RXD_ERR_TOO_SMALL 0x00400000 |
| 2609 | #define RXD_ERR_NO_RESOURCES 0x00800000 | 2609 | #define RXD_ERR_NO_RESOURCES 0x00800000 |
| 2610 | #define RXD_ERR_HUGE_FRAME 0x01000000 | 2610 | #define RXD_ERR_HUGE_FRAME 0x01000000 |
| 2611 | #define RXD_ERR_MASK 0xffff0000 | 2611 | |
| 2612 | #define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \ | ||
| 2613 | RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \ | ||
| 2614 | RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \ | ||
| 2615 | RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME) | ||
| 2612 | 2616 | ||
| 2613 | u32 reserved; | 2617 | u32 reserved; |
| 2614 | u32 opaque; | 2618 | u32 opaque; |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 1803c3959044..354ae9792bad 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c | |||
| @@ -1704,7 +1704,7 @@ bfa_flash_sem_get(void __iomem *bar) | |||
| 1704 | while (!bfa_raw_sem_get(bar)) { | 1704 | while (!bfa_raw_sem_get(bar)) { |
| 1705 | if (--n <= 0) | 1705 | if (--n <= 0) |
| 1706 | return BFA_STATUS_BADFLASH; | 1706 | return BFA_STATUS_BADFLASH; |
| 1707 | udelay(10000); | 1707 | mdelay(10); |
| 1708 | } | 1708 | } |
| 1709 | return BFA_STATUS_OK; | 1709 | return BFA_STATUS_OK; |
| 1710 | } | 1710 | } |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index cf64f3d0b60d..4ad1187e82fb 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
| @@ -707,7 +707,8 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) | |||
| 707 | else | 707 | else |
| 708 | skb_checksum_none_assert(skb); | 708 | skb_checksum_none_assert(skb); |
| 709 | 709 | ||
| 710 | if (flags & BNA_CQ_EF_VLAN) | 710 | if ((flags & BNA_CQ_EF_VLAN) && |
| 711 | (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) | ||
| 711 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); | 712 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); |
| 712 | 713 | ||
| 713 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) | 714 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) |
| @@ -2094,7 +2095,9 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) | |||
| 2094 | rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE; | 2095 | rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE; |
| 2095 | } | 2096 | } |
| 2096 | 2097 | ||
| 2097 | rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; | 2098 | rx_config->vlan_strip_status = |
| 2099 | (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ? | ||
| 2100 | BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; | ||
| 2098 | } | 2101 | } |
| 2099 | 2102 | ||
| 2100 | static void | 2103 | static void |
| @@ -3245,11 +3248,6 @@ bnad_set_rx_mode(struct net_device *netdev) | |||
| 3245 | BNA_RXMODE_ALLMULTI; | 3248 | BNA_RXMODE_ALLMULTI; |
| 3246 | bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL); | 3249 | bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL); |
| 3247 | 3250 | ||
| 3248 | if (bnad->cfg_flags & BNAD_CF_PROMISC) | ||
| 3249 | bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); | ||
| 3250 | else | ||
| 3251 | bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); | ||
| 3252 | |||
| 3253 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 3251 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
| 3254 | } | 3252 | } |
| 3255 | 3253 | ||
| @@ -3374,6 +3372,27 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) | |||
| 3374 | return 0; | 3372 | return 0; |
| 3375 | } | 3373 | } |
| 3376 | 3374 | ||
| 3375 | static int bnad_set_features(struct net_device *dev, netdev_features_t features) | ||
| 3376 | { | ||
| 3377 | struct bnad *bnad = netdev_priv(dev); | ||
| 3378 | netdev_features_t changed = features ^ dev->features; | ||
| 3379 | |||
| 3380 | if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) { | ||
| 3381 | unsigned long flags; | ||
| 3382 | |||
| 3383 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
| 3384 | |||
| 3385 | if (features & NETIF_F_HW_VLAN_CTAG_RX) | ||
| 3386 | bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); | ||
| 3387 | else | ||
| 3388 | bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); | ||
| 3389 | |||
| 3390 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
| 3391 | } | ||
| 3392 | |||
| 3393 | return 0; | ||
| 3394 | } | ||
| 3395 | |||
| 3377 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3396 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 3378 | static void | 3397 | static void |
| 3379 | bnad_netpoll(struct net_device *netdev) | 3398 | bnad_netpoll(struct net_device *netdev) |
| @@ -3421,6 +3440,7 @@ static const struct net_device_ops bnad_netdev_ops = { | |||
| 3421 | .ndo_change_mtu = bnad_change_mtu, | 3440 | .ndo_change_mtu = bnad_change_mtu, |
| 3422 | .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, | 3441 | .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, |
| 3423 | .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, | 3442 | .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, |
| 3443 | .ndo_set_features = bnad_set_features, | ||
| 3424 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3444 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 3425 | .ndo_poll_controller = bnad_netpoll | 3445 | .ndo_poll_controller = bnad_netpoll |
| 3426 | #endif | 3446 | #endif |
| @@ -3433,14 +3453,14 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac) | |||
| 3433 | 3453 | ||
| 3434 | netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | | 3454 | netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | |
| 3435 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 3455 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
| 3436 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX; | 3456 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX | |
| 3457 | NETIF_F_HW_VLAN_CTAG_RX; | ||
| 3437 | 3458 | ||
| 3438 | netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | | 3459 | netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | |
| 3439 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 3460 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
| 3440 | NETIF_F_TSO | NETIF_F_TSO6; | 3461 | NETIF_F_TSO | NETIF_F_TSO6; |
| 3441 | 3462 | ||
| 3442 | netdev->features |= netdev->hw_features | | 3463 | netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; |
| 3443 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; | ||
| 3444 | 3464 | ||
| 3445 | if (using_dac) | 3465 | if (using_dac) |
| 3446 | netdev->features |= NETIF_F_HIGHDMA; | 3466 | netdev->features |= NETIF_F_HIGHDMA; |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 3190d38e16fb..d0c38e01e99f 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
| @@ -632,11 +632,16 @@ static void gem_rx_refill(struct macb *bp) | |||
| 632 | "Unable to allocate sk_buff\n"); | 632 | "Unable to allocate sk_buff\n"); |
| 633 | break; | 633 | break; |
| 634 | } | 634 | } |
| 635 | bp->rx_skbuff[entry] = skb; | ||
| 636 | 635 | ||
| 637 | /* now fill corresponding descriptor entry */ | 636 | /* now fill corresponding descriptor entry */ |
| 638 | paddr = dma_map_single(&bp->pdev->dev, skb->data, | 637 | paddr = dma_map_single(&bp->pdev->dev, skb->data, |
| 639 | bp->rx_buffer_size, DMA_FROM_DEVICE); | 638 | bp->rx_buffer_size, DMA_FROM_DEVICE); |
| 639 | if (dma_mapping_error(&bp->pdev->dev, paddr)) { | ||
| 640 | dev_kfree_skb(skb); | ||
| 641 | break; | ||
| 642 | } | ||
| 643 | |||
| 644 | bp->rx_skbuff[entry] = skb; | ||
| 640 | 645 | ||
| 641 | if (entry == RX_RING_SIZE - 1) | 646 | if (entry == RX_RING_SIZE - 1) |
| 642 | paddr |= MACB_BIT(RX_WRAP); | 647 | paddr |= MACB_BIT(RX_WRAP); |
| @@ -725,7 +730,7 @@ static int gem_rx(struct macb *bp, int budget) | |||
| 725 | skb_put(skb, len); | 730 | skb_put(skb, len); |
| 726 | addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); | 731 | addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); |
| 727 | dma_unmap_single(&bp->pdev->dev, addr, | 732 | dma_unmap_single(&bp->pdev->dev, addr, |
| 728 | len, DMA_FROM_DEVICE); | 733 | bp->rx_buffer_size, DMA_FROM_DEVICE); |
| 729 | 734 | ||
| 730 | skb->protocol = eth_type_trans(skb, bp->dev); | 735 | skb->protocol = eth_type_trans(skb, bp->dev); |
| 731 | skb_checksum_none_assert(skb); | 736 | skb_checksum_none_assert(skb); |
| @@ -1036,11 +1041,15 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1036 | } | 1041 | } |
| 1037 | 1042 | ||
| 1038 | entry = macb_tx_ring_wrap(bp->tx_head); | 1043 | entry = macb_tx_ring_wrap(bp->tx_head); |
| 1039 | bp->tx_head++; | ||
| 1040 | netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); | 1044 | netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); |
| 1041 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | 1045 | mapping = dma_map_single(&bp->pdev->dev, skb->data, |
| 1042 | len, DMA_TO_DEVICE); | 1046 | len, DMA_TO_DEVICE); |
| 1047 | if (dma_mapping_error(&bp->pdev->dev, mapping)) { | ||
| 1048 | kfree_skb(skb); | ||
| 1049 | goto unlock; | ||
| 1050 | } | ||
| 1043 | 1051 | ||
| 1052 | bp->tx_head++; | ||
| 1044 | tx_skb = &bp->tx_skb[entry]; | 1053 | tx_skb = &bp->tx_skb[entry]; |
| 1045 | tx_skb->skb = skb; | 1054 | tx_skb->skb = skb; |
| 1046 | tx_skb->mapping = mapping; | 1055 | tx_skb->mapping = mapping; |
| @@ -1066,6 +1075,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1066 | if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) | 1075 | if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) |
| 1067 | netif_stop_queue(dev); | 1076 | netif_stop_queue(dev); |
| 1068 | 1077 | ||
| 1078 | unlock: | ||
| 1069 | spin_unlock_irqrestore(&bp->lock, flags); | 1079 | spin_unlock_irqrestore(&bp->lock, flags); |
| 1070 | 1080 | ||
| 1071 | return NETDEV_TX_OK; | 1081 | return NETDEV_TX_OK; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 43ab35fea48d..34e2488767d9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
| @@ -6179,6 +6179,7 @@ static struct pci_driver cxgb4_driver = { | |||
| 6179 | .id_table = cxgb4_pci_tbl, | 6179 | .id_table = cxgb4_pci_tbl, |
| 6180 | .probe = init_one, | 6180 | .probe = init_one, |
| 6181 | .remove = remove_one, | 6181 | .remove = remove_one, |
| 6182 | .shutdown = remove_one, | ||
| 6182 | .err_handler = &cxgb4_eeh, | 6183 | .err_handler = &cxgb4_eeh, |
| 6183 | }; | 6184 | }; |
| 6184 | 6185 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 8d09615da585..05529e273050 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
| @@ -350,11 +350,13 @@ struct be_drv_stats { | |||
| 350 | u32 roce_drops_crc; | 350 | u32 roce_drops_crc; |
| 351 | }; | 351 | }; |
| 352 | 352 | ||
| 353 | /* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */ | ||
| 354 | #define BE_RESET_VLAN_TAG_ID 0xFFFF | ||
| 355 | |||
| 353 | struct be_vf_cfg { | 356 | struct be_vf_cfg { |
| 354 | unsigned char mac_addr[ETH_ALEN]; | 357 | unsigned char mac_addr[ETH_ALEN]; |
| 355 | int if_handle; | 358 | int if_handle; |
| 356 | int pmac_id; | 359 | int pmac_id; |
| 357 | u16 def_vid; | ||
| 358 | u16 vlan_tag; | 360 | u16 vlan_tag; |
| 359 | u32 tx_rate; | 361 | u32 tx_rate; |
| 360 | }; | 362 | }; |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 04ac9c6a0d39..36c80612e21a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -913,24 +913,14 @@ static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, | |||
| 913 | return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); | 913 | return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); |
| 914 | } | 914 | } |
| 915 | 915 | ||
| 916 | static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, | 916 | static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, |
| 917 | struct sk_buff *skb, | 917 | struct sk_buff *skb, |
| 918 | bool *skip_hw_vlan) | 918 | bool *skip_hw_vlan) |
| 919 | { | 919 | { |
| 920 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | 920 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; |
| 921 | unsigned int eth_hdr_len; | 921 | unsigned int eth_hdr_len; |
| 922 | struct iphdr *ip; | 922 | struct iphdr *ip; |
| 923 | 923 | ||
| 924 | /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less | ||
| 925 | * may cause a transmit stall on that port. So the work-around is to | ||
| 926 | * pad short packets (<= 32 bytes) to a 36-byte length. | ||
| 927 | */ | ||
| 928 | if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { | ||
| 929 | if (skb_padto(skb, 36)) | ||
| 930 | goto tx_drop; | ||
| 931 | skb->len = 36; | ||
| 932 | } | ||
| 933 | |||
| 934 | /* For padded packets, BE HW modifies tot_len field in IP header | 924 | /* For padded packets, BE HW modifies tot_len field in IP header |
| 935 | * incorrecly when VLAN tag is inserted by HW. | 925 | * incorrecly when VLAN tag is inserted by HW. |
| 936 | * For padded packets, Lancer computes incorrect checksum. | 926 | * For padded packets, Lancer computes incorrect checksum. |
| @@ -959,7 +949,7 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, | |||
| 959 | vlan_tx_tag_present(skb)) { | 949 | vlan_tx_tag_present(skb)) { |
| 960 | skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); | 950 | skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); |
| 961 | if (unlikely(!skb)) | 951 | if (unlikely(!skb)) |
| 962 | goto tx_drop; | 952 | goto err; |
| 963 | } | 953 | } |
| 964 | 954 | ||
| 965 | /* HW may lockup when VLAN HW tagging is requested on | 955 | /* HW may lockup when VLAN HW tagging is requested on |
| @@ -981,15 +971,39 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, | |||
| 981 | be_vlan_tag_tx_chk(adapter, skb)) { | 971 | be_vlan_tag_tx_chk(adapter, skb)) { |
| 982 | skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); | 972 | skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); |
| 983 | if (unlikely(!skb)) | 973 | if (unlikely(!skb)) |
| 984 | goto tx_drop; | 974 | goto err; |
| 985 | } | 975 | } |
| 986 | 976 | ||
| 987 | return skb; | 977 | return skb; |
| 988 | tx_drop: | 978 | tx_drop: |
| 989 | dev_kfree_skb_any(skb); | 979 | dev_kfree_skb_any(skb); |
| 980 | err: | ||
| 990 | return NULL; | 981 | return NULL; |
| 991 | } | 982 | } |
| 992 | 983 | ||
| 984 | static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, | ||
| 985 | struct sk_buff *skb, | ||
| 986 | bool *skip_hw_vlan) | ||
| 987 | { | ||
| 988 | /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or | ||
| 989 | * less may cause a transmit stall on that port. So the work-around is | ||
| 990 | * to pad short packets (<= 32 bytes) to a 36-byte length. | ||
| 991 | */ | ||
| 992 | if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { | ||
| 993 | if (skb_padto(skb, 36)) | ||
| 994 | return NULL; | ||
| 995 | skb->len = 36; | ||
| 996 | } | ||
| 997 | |||
| 998 | if (BEx_chip(adapter) || lancer_chip(adapter)) { | ||
| 999 | skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan); | ||
| 1000 | if (!skb) | ||
| 1001 | return NULL; | ||
| 1002 | } | ||
| 1003 | |||
| 1004 | return skb; | ||
| 1005 | } | ||
| 1006 | |||
| 993 | static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) | 1007 | static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) |
| 994 | { | 1008 | { |
| 995 | struct be_adapter *adapter = netdev_priv(netdev); | 1009 | struct be_adapter *adapter = netdev_priv(netdev); |
| @@ -1157,6 +1171,14 @@ ret: | |||
| 1157 | return status; | 1171 | return status; |
| 1158 | } | 1172 | } |
| 1159 | 1173 | ||
| 1174 | static void be_clear_promisc(struct be_adapter *adapter) | ||
| 1175 | { | ||
| 1176 | adapter->promiscuous = false; | ||
| 1177 | adapter->flags &= ~BE_FLAGS_VLAN_PROMISC; | ||
| 1178 | |||
| 1179 | be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); | ||
| 1180 | } | ||
| 1181 | |||
| 1160 | static void be_set_rx_mode(struct net_device *netdev) | 1182 | static void be_set_rx_mode(struct net_device *netdev) |
| 1161 | { | 1183 | { |
| 1162 | struct be_adapter *adapter = netdev_priv(netdev); | 1184 | struct be_adapter *adapter = netdev_priv(netdev); |
| @@ -1170,9 +1192,7 @@ static void be_set_rx_mode(struct net_device *netdev) | |||
| 1170 | 1192 | ||
| 1171 | /* BE was previously in promiscuous mode; disable it */ | 1193 | /* BE was previously in promiscuous mode; disable it */ |
| 1172 | if (adapter->promiscuous) { | 1194 | if (adapter->promiscuous) { |
| 1173 | adapter->promiscuous = false; | 1195 | be_clear_promisc(adapter); |
| 1174 | be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); | ||
| 1175 | |||
| 1176 | if (adapter->vlans_added) | 1196 | if (adapter->vlans_added) |
| 1177 | be_vid_config(adapter); | 1197 | be_vid_config(adapter); |
| 1178 | } | 1198 | } |
| @@ -1287,24 +1307,20 @@ static int be_set_vf_vlan(struct net_device *netdev, | |||
| 1287 | 1307 | ||
| 1288 | if (vlan || qos) { | 1308 | if (vlan || qos) { |
| 1289 | vlan |= qos << VLAN_PRIO_SHIFT; | 1309 | vlan |= qos << VLAN_PRIO_SHIFT; |
| 1290 | if (vf_cfg->vlan_tag != vlan) { | 1310 | if (vf_cfg->vlan_tag != vlan) |
| 1291 | /* If this is new value, program it. Else skip. */ | ||
| 1292 | vf_cfg->vlan_tag = vlan; | ||
| 1293 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, | 1311 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, |
| 1294 | vf_cfg->if_handle, 0); | 1312 | vf_cfg->if_handle, 0); |
| 1295 | } | ||
| 1296 | } else { | 1313 | } else { |
| 1297 | /* Reset Transparent Vlan Tagging. */ | 1314 | /* Reset Transparent Vlan Tagging. */ |
| 1298 | vf_cfg->vlan_tag = 0; | 1315 | status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, |
| 1299 | vlan = vf_cfg->def_vid; | 1316 | vf + 1, vf_cfg->if_handle, 0); |
| 1300 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, | ||
| 1301 | vf_cfg->if_handle, 0); | ||
| 1302 | } | 1317 | } |
| 1303 | 1318 | ||
| 1304 | 1319 | if (!status) | |
| 1305 | if (status) | 1320 | vf_cfg->vlan_tag = vlan; |
| 1321 | else | ||
| 1306 | dev_info(&adapter->pdev->dev, | 1322 | dev_info(&adapter->pdev->dev, |
| 1307 | "VLAN %d config on VF %d failed\n", vlan, vf); | 1323 | "VLAN %d config on VF %d failed\n", vlan, vf); |
| 1308 | return status; | 1324 | return status; |
| 1309 | } | 1325 | } |
| 1310 | 1326 | ||
| @@ -3013,11 +3029,11 @@ static int be_vf_setup_init(struct be_adapter *adapter) | |||
| 3013 | 3029 | ||
| 3014 | static int be_vf_setup(struct be_adapter *adapter) | 3030 | static int be_vf_setup(struct be_adapter *adapter) |
| 3015 | { | 3031 | { |
| 3032 | struct device *dev = &adapter->pdev->dev; | ||
| 3016 | struct be_vf_cfg *vf_cfg; | 3033 | struct be_vf_cfg *vf_cfg; |
| 3017 | u16 def_vlan, lnk_speed; | ||
| 3018 | int status, old_vfs, vf; | 3034 | int status, old_vfs, vf; |
| 3019 | struct device *dev = &adapter->pdev->dev; | ||
| 3020 | u32 privileges; | 3035 | u32 privileges; |
| 3036 | u16 lnk_speed; | ||
| 3021 | 3037 | ||
| 3022 | old_vfs = pci_num_vf(adapter->pdev); | 3038 | old_vfs = pci_num_vf(adapter->pdev); |
| 3023 | if (old_vfs) { | 3039 | if (old_vfs) { |
| @@ -3084,12 +3100,6 @@ static int be_vf_setup(struct be_adapter *adapter) | |||
| 3084 | if (!status) | 3100 | if (!status) |
| 3085 | vf_cfg->tx_rate = lnk_speed; | 3101 | vf_cfg->tx_rate = lnk_speed; |
| 3086 | 3102 | ||
| 3087 | status = be_cmd_get_hsw_config(adapter, &def_vlan, | ||
| 3088 | vf + 1, vf_cfg->if_handle, NULL); | ||
| 3089 | if (status) | ||
| 3090 | goto err; | ||
| 3091 | vf_cfg->def_vid = def_vlan; | ||
| 3092 | |||
| 3093 | if (!old_vfs) | 3103 | if (!old_vfs) |
| 3094 | be_cmd_enable_vf(adapter, vf + 1); | 3104 | be_cmd_enable_vf(adapter, vf + 1); |
| 3095 | } | 3105 | } |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 903362a7b584..03a351300013 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -389,12 +389,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 389 | netdev_err(ndev, "Tx DMA memory map failed\n"); | 389 | netdev_err(ndev, "Tx DMA memory map failed\n"); |
| 390 | return NETDEV_TX_OK; | 390 | return NETDEV_TX_OK; |
| 391 | } | 391 | } |
| 392 | /* Send it on its way. Tell FEC it's ready, interrupt when done, | ||
| 393 | * it's the last BD of the frame, and to put the CRC on the end. | ||
| 394 | */ | ||
| 395 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | ||
| 396 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); | ||
| 397 | bdp->cbd_sc = status; | ||
| 398 | 392 | ||
| 399 | if (fep->bufdesc_ex) { | 393 | if (fep->bufdesc_ex) { |
| 400 | 394 | ||
| @@ -416,6 +410,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 416 | } | 410 | } |
| 417 | } | 411 | } |
| 418 | 412 | ||
| 413 | /* Send it on its way. Tell FEC it's ready, interrupt when done, | ||
| 414 | * it's the last BD of the frame, and to put the CRC on the end. | ||
| 415 | */ | ||
| 416 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | ||
| 417 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); | ||
| 418 | bdp->cbd_sc = status; | ||
| 419 | |||
| 419 | bdp_pre = fec_enet_get_prevdesc(bdp, fep); | 420 | bdp_pre = fec_enet_get_prevdesc(bdp, fep); |
| 420 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && | 421 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && |
| 421 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { | 422 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { |
| @@ -527,13 +528,6 @@ fec_restart(struct net_device *ndev, int duplex) | |||
| 527 | /* Clear any outstanding interrupt. */ | 528 | /* Clear any outstanding interrupt. */ |
| 528 | writel(0xffc00000, fep->hwp + FEC_IEVENT); | 529 | writel(0xffc00000, fep->hwp + FEC_IEVENT); |
| 529 | 530 | ||
| 530 | /* Setup multicast filter. */ | ||
| 531 | set_multicast_list(ndev); | ||
| 532 | #ifndef CONFIG_M5272 | ||
| 533 | writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); | ||
| 534 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); | ||
| 535 | #endif | ||
| 536 | |||
| 537 | /* Set maximum receive buffer size. */ | 531 | /* Set maximum receive buffer size. */ |
| 538 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); | 532 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); |
| 539 | 533 | ||
| @@ -654,6 +648,13 @@ fec_restart(struct net_device *ndev, int duplex) | |||
| 654 | 648 | ||
| 655 | writel(rcntl, fep->hwp + FEC_R_CNTRL); | 649 | writel(rcntl, fep->hwp + FEC_R_CNTRL); |
| 656 | 650 | ||
| 651 | /* Setup multicast filter. */ | ||
| 652 | set_multicast_list(ndev); | ||
| 653 | #ifndef CONFIG_M5272 | ||
| 654 | writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); | ||
| 655 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); | ||
| 656 | #endif | ||
| 657 | |||
| 657 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { | 658 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { |
| 658 | /* enable ENET endian swap */ | 659 | /* enable ENET endian swap */ |
| 659 | ecntl |= (1 << 8); | 660 | ecntl |= (1 << 8); |
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 4be971590461..1fc8334fc181 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
| @@ -522,10 +522,21 @@ retry: | |||
| 522 | return rc; | 522 | return rc; |
| 523 | } | 523 | } |
| 524 | 524 | ||
| 525 | static u64 ibmveth_encode_mac_addr(u8 *mac) | ||
| 526 | { | ||
| 527 | int i; | ||
| 528 | u64 encoded = 0; | ||
| 529 | |||
| 530 | for (i = 0; i < ETH_ALEN; i++) | ||
| 531 | encoded = (encoded << 8) | mac[i]; | ||
| 532 | |||
| 533 | return encoded; | ||
| 534 | } | ||
| 535 | |||
| 525 | static int ibmveth_open(struct net_device *netdev) | 536 | static int ibmveth_open(struct net_device *netdev) |
| 526 | { | 537 | { |
| 527 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | 538 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
| 528 | u64 mac_address = 0; | 539 | u64 mac_address; |
| 529 | int rxq_entries = 1; | 540 | int rxq_entries = 1; |
| 530 | unsigned long lpar_rc; | 541 | unsigned long lpar_rc; |
| 531 | int rc; | 542 | int rc; |
| @@ -579,8 +590,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
| 579 | adapter->rx_queue.num_slots = rxq_entries; | 590 | adapter->rx_queue.num_slots = rxq_entries; |
| 580 | adapter->rx_queue.toggle = 1; | 591 | adapter->rx_queue.toggle = 1; |
| 581 | 592 | ||
| 582 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); | 593 | mac_address = ibmveth_encode_mac_addr(netdev->dev_addr); |
| 583 | mac_address = mac_address >> 16; | ||
| 584 | 594 | ||
| 585 | rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | | 595 | rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | |
| 586 | adapter->rx_queue.queue_len; | 596 | adapter->rx_queue.queue_len; |
| @@ -1183,8 +1193,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
| 1183 | /* add the addresses to the filter table */ | 1193 | /* add the addresses to the filter table */ |
| 1184 | netdev_for_each_mc_addr(ha, netdev) { | 1194 | netdev_for_each_mc_addr(ha, netdev) { |
| 1185 | /* add the multicast address to the filter table */ | 1195 | /* add the multicast address to the filter table */ |
| 1186 | unsigned long mcast_addr = 0; | 1196 | u64 mcast_addr; |
| 1187 | memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN); | 1197 | mcast_addr = ibmveth_encode_mac_addr(ha->addr); |
| 1188 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | 1198 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, |
| 1189 | IbmVethMcastAddFilter, | 1199 | IbmVethMcastAddFilter, |
| 1190 | mcast_addr); | 1200 | mcast_addr); |
| @@ -1372,9 +1382,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 1372 | 1382 | ||
| 1373 | netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); | 1383 | netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); |
| 1374 | 1384 | ||
| 1375 | adapter->mac_addr = 0; | ||
| 1376 | memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN); | ||
| 1377 | |||
| 1378 | netdev->irq = dev->irq; | 1385 | netdev->irq = dev->irq; |
| 1379 | netdev->netdev_ops = &ibmveth_netdev_ops; | 1386 | netdev->netdev_ops = &ibmveth_netdev_ops; |
| 1380 | netdev->ethtool_ops = &netdev_ethtool_ops; | 1387 | netdev->ethtool_ops = &netdev_ethtool_ops; |
| @@ -1383,7 +1390,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 1383 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | 1390 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
| 1384 | netdev->features |= netdev->hw_features; | 1391 | netdev->features |= netdev->hw_features; |
| 1385 | 1392 | ||
| 1386 | memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); | 1393 | memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); |
| 1387 | 1394 | ||
| 1388 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { | 1395 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
| 1389 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; | 1396 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; |
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 451ba7949e15..1f37499d4398 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h | |||
| @@ -138,7 +138,6 @@ struct ibmveth_adapter { | |||
| 138 | struct napi_struct napi; | 138 | struct napi_struct napi; |
| 139 | struct net_device_stats stats; | 139 | struct net_device_stats stats; |
| 140 | unsigned int mcastFilterSize; | 140 | unsigned int mcastFilterSize; |
| 141 | unsigned long mac_addr; | ||
| 142 | void * buffer_list_addr; | 141 | void * buffer_list_addr; |
| 143 | void * filter_list_addr; | 142 | void * filter_list_addr; |
| 144 | dma_addr_t buffer_list_dma; | 143 | dma_addr_t buffer_list_dma; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index fad45316200a..84a96f70dfb5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -742,6 +742,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, | |||
| 742 | err = mlx4_en_uc_steer_add(priv, new_mac, | 742 | err = mlx4_en_uc_steer_add(priv, new_mac, |
| 743 | &qpn, | 743 | &qpn, |
| 744 | &entry->reg_id); | 744 | &entry->reg_id); |
| 745 | if (err) | ||
| 746 | return err; | ||
| 747 | if (priv->tunnel_reg_id) { | ||
| 748 | mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); | ||
| 749 | priv->tunnel_reg_id = 0; | ||
| 750 | } | ||
| 751 | err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, | ||
| 752 | &priv->tunnel_reg_id); | ||
| 745 | return err; | 753 | return err; |
| 746 | } | 754 | } |
| 747 | } | 755 | } |
| @@ -1792,6 +1800,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) | |||
| 1792 | mc_list[5] = priv->port; | 1800 | mc_list[5] = priv->port; |
| 1793 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, | 1801 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, |
| 1794 | mc_list, MLX4_PROT_ETH, mclist->reg_id); | 1802 | mc_list, MLX4_PROT_ETH, mclist->reg_id); |
| 1803 | if (mclist->tunnel_reg_id) | ||
| 1804 | mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); | ||
| 1795 | } | 1805 | } |
| 1796 | mlx4_en_clear_list(dev); | 1806 | mlx4_en_clear_list(dev); |
| 1797 | list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { | 1807 | list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 91b69ff4b4a2..7e2995ecea6f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
| @@ -129,13 +129,14 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) | |||
| 129 | [0] = "RSS support", | 129 | [0] = "RSS support", |
| 130 | [1] = "RSS Toeplitz Hash Function support", | 130 | [1] = "RSS Toeplitz Hash Function support", |
| 131 | [2] = "RSS XOR Hash Function support", | 131 | [2] = "RSS XOR Hash Function support", |
| 132 | [3] = "Device manage flow steering support", | 132 | [3] = "Device managed flow steering support", |
| 133 | [4] = "Automatic MAC reassignment support", | 133 | [4] = "Automatic MAC reassignment support", |
| 134 | [5] = "Time stamping support", | 134 | [5] = "Time stamping support", |
| 135 | [6] = "VST (control vlan insertion/stripping) support", | 135 | [6] = "VST (control vlan insertion/stripping) support", |
| 136 | [7] = "FSM (MAC anti-spoofing) support", | 136 | [7] = "FSM (MAC anti-spoofing) support", |
| 137 | [8] = "Dynamic QP updates support", | 137 | [8] = "Dynamic QP updates support", |
| 138 | [9] = "TCP/IP offloads/flow-steering for VXLAN support" | 138 | [9] = "Device managed flow steering IPoIB support", |
| 139 | [10] = "TCP/IP offloads/flow-steering for VXLAN support" | ||
| 139 | }; | 140 | }; |
| 140 | int i; | 141 | int i; |
| 141 | 142 | ||
| @@ -859,7 +860,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
| 859 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); | 860 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); |
| 860 | 861 | ||
| 861 | /* For guests, disable vxlan tunneling */ | 862 | /* For guests, disable vxlan tunneling */ |
| 862 | MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); | 863 | MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); |
| 863 | field &= 0xf7; | 864 | field &= 0xf7; |
| 864 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); | 865 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); |
| 865 | 866 | ||
| @@ -869,7 +870,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
| 869 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); | 870 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); |
| 870 | 871 | ||
| 871 | /* For guests, disable mw type 2 */ | 872 | /* For guests, disable mw type 2 */ |
| 872 | MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); | 873 | MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); |
| 873 | bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; | 874 | bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; |
| 874 | MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); | 875 | MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); |
| 875 | 876 | ||
| @@ -883,7 +884,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
| 883 | } | 884 | } |
| 884 | 885 | ||
| 885 | /* turn off ipoib managed steering for guests */ | 886 | /* turn off ipoib managed steering for guests */ |
| 886 | MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); | 887 | MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); |
| 887 | field &= ~0x80; | 888 | field &= ~0x80; |
| 888 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); | 889 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); |
| 889 | 890 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index d711158b0d4b..936c15364739 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -150,6 +150,8 @@ struct mlx4_port_config { | |||
| 150 | struct pci_dev *pdev; | 150 | struct pci_dev *pdev; |
| 151 | }; | 151 | }; |
| 152 | 152 | ||
| 153 | static atomic_t pf_loading = ATOMIC_INIT(0); | ||
| 154 | |||
| 153 | int mlx4_check_port_params(struct mlx4_dev *dev, | 155 | int mlx4_check_port_params(struct mlx4_dev *dev, |
| 154 | enum mlx4_port_type *port_type) | 156 | enum mlx4_port_type *port_type) |
| 155 | { | 157 | { |
| @@ -749,7 +751,7 @@ static void mlx4_request_modules(struct mlx4_dev *dev) | |||
| 749 | has_eth_port = true; | 751 | has_eth_port = true; |
| 750 | } | 752 | } |
| 751 | 753 | ||
| 752 | if (has_ib_port) | 754 | if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) |
| 753 | request_module_nowait(IB_DRV_NAME); | 755 | request_module_nowait(IB_DRV_NAME); |
| 754 | if (has_eth_port) | 756 | if (has_eth_port) |
| 755 | request_module_nowait(EN_DRV_NAME); | 757 | request_module_nowait(EN_DRV_NAME); |
| @@ -1407,6 +1409,11 @@ static int mlx4_init_slave(struct mlx4_dev *dev) | |||
| 1407 | u32 slave_read; | 1409 | u32 slave_read; |
| 1408 | u32 cmd_channel_ver; | 1410 | u32 cmd_channel_ver; |
| 1409 | 1411 | ||
| 1412 | if (atomic_read(&pf_loading)) { | ||
| 1413 | mlx4_warn(dev, "PF is not ready. Deferring probe\n"); | ||
| 1414 | return -EPROBE_DEFER; | ||
| 1415 | } | ||
| 1416 | |||
| 1410 | mutex_lock(&priv->cmd.slave_cmd_mutex); | 1417 | mutex_lock(&priv->cmd.slave_cmd_mutex); |
| 1411 | priv->cmd.max_cmds = 1; | 1418 | priv->cmd.max_cmds = 1; |
| 1412 | mlx4_warn(dev, "Sending reset\n"); | 1419 | mlx4_warn(dev, "Sending reset\n"); |
| @@ -2319,7 +2326,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) | |||
| 2319 | 2326 | ||
| 2320 | if (num_vfs) { | 2327 | if (num_vfs) { |
| 2321 | mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs); | 2328 | mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs); |
| 2329 | |||
| 2330 | atomic_inc(&pf_loading); | ||
| 2322 | err = pci_enable_sriov(pdev, num_vfs); | 2331 | err = pci_enable_sriov(pdev, num_vfs); |
| 2332 | atomic_dec(&pf_loading); | ||
| 2333 | |||
| 2323 | if (err) { | 2334 | if (err) { |
| 2324 | mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", | 2335 | mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", |
| 2325 | err); | 2336 | err); |
| @@ -2684,6 +2695,7 @@ static struct pci_driver mlx4_driver = { | |||
| 2684 | .name = DRV_NAME, | 2695 | .name = DRV_NAME, |
| 2685 | .id_table = mlx4_pci_table, | 2696 | .id_table = mlx4_pci_table, |
| 2686 | .probe = mlx4_init_one, | 2697 | .probe = mlx4_init_one, |
| 2698 | .shutdown = mlx4_remove_one, | ||
| 2687 | .remove = mlx4_remove_one, | 2699 | .remove = mlx4_remove_one, |
| 2688 | .err_handler = &mlx4_err_handler, | 2700 | .err_handler = &mlx4_err_handler, |
| 2689 | }; | 2701 | }; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 6b65f7795215..7aec6c833973 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
| @@ -51,8 +51,8 @@ | |||
| 51 | 51 | ||
| 52 | #define DRV_NAME "mlx4_core" | 52 | #define DRV_NAME "mlx4_core" |
| 53 | #define PFX DRV_NAME ": " | 53 | #define PFX DRV_NAME ": " |
| 54 | #define DRV_VERSION "1.1" | 54 | #define DRV_VERSION "2.2-1" |
| 55 | #define DRV_RELDATE "Dec, 2011" | 55 | #define DRV_RELDATE "Feb, 2014" |
| 56 | 56 | ||
| 57 | #define MLX4_FS_UDP_UC_EN (1 << 1) | 57 | #define MLX4_FS_UDP_UC_EN (1 << 1) |
| 58 | #define MLX4_FS_TCP_UC_EN (1 << 2) | 58 | #define MLX4_FS_TCP_UC_EN (1 << 2) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 9ca223bc90fc..b57e8c87a34e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
| @@ -57,8 +57,8 @@ | |||
| 57 | #include "en_port.h" | 57 | #include "en_port.h" |
| 58 | 58 | ||
| 59 | #define DRV_NAME "mlx4_en" | 59 | #define DRV_NAME "mlx4_en" |
| 60 | #define DRV_VERSION "2.0" | 60 | #define DRV_VERSION "2.2-1" |
| 61 | #define DRV_RELDATE "Dec 2011" | 61 | #define DRV_RELDATE "Feb 2014" |
| 62 | 62 | ||
| 63 | #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) | 63 | #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) |
| 64 | 64 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index a064f06e0cb8..23b7e2d35a93 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
| @@ -46,8 +46,8 @@ | |||
| 46 | #include "mlx5_core.h" | 46 | #include "mlx5_core.h" |
| 47 | 47 | ||
| 48 | #define DRIVER_NAME "mlx5_core" | 48 | #define DRIVER_NAME "mlx5_core" |
| 49 | #define DRIVER_VERSION "1.0" | 49 | #define DRIVER_VERSION "2.2-1" |
| 50 | #define DRIVER_RELDATE "June 2013" | 50 | #define DRIVER_RELDATE "Feb 2014" |
| 51 | 51 | ||
| 52 | MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); | 52 | MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); |
| 53 | MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library"); | 53 | MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library"); |
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 727b546a9eb8..e0c92e0e5e1d 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/crc32.h> | 23 | #include <linux/crc32.h> |
| 24 | #include <linux/mii.h> | 24 | #include <linux/mii.h> |
| 25 | #include <linux/eeprom_93cx6.h> | 25 | #include <linux/eeprom_93cx6.h> |
| 26 | #include <linux/regulator/consumer.h> | ||
| 26 | 27 | ||
| 27 | #include <linux/spi/spi.h> | 28 | #include <linux/spi/spi.h> |
| 28 | 29 | ||
| @@ -83,6 +84,7 @@ union ks8851_tx_hdr { | |||
| 83 | * @rc_rxqcr: Cached copy of KS_RXQCR. | 84 | * @rc_rxqcr: Cached copy of KS_RXQCR. |
| 84 | * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom | 85 | * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom |
| 85 | * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. | 86 | * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. |
| 87 | * @vdd_reg: Optional regulator supplying the chip | ||
| 86 | * | 88 | * |
| 87 | * The @lock ensures that the chip is protected when certain operations are | 89 | * The @lock ensures that the chip is protected when certain operations are |
| 88 | * in progress. When the read or write packet transfer is in progress, most | 90 | * in progress. When the read or write packet transfer is in progress, most |
| @@ -130,6 +132,7 @@ struct ks8851_net { | |||
| 130 | struct spi_transfer spi_xfer2[2]; | 132 | struct spi_transfer spi_xfer2[2]; |
| 131 | 133 | ||
| 132 | struct eeprom_93cx6 eeprom; | 134 | struct eeprom_93cx6 eeprom; |
| 135 | struct regulator *vdd_reg; | ||
| 133 | }; | 136 | }; |
| 134 | 137 | ||
| 135 | static int msg_enable; | 138 | static int msg_enable; |
| @@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi) | |||
| 1414 | ks->spidev = spi; | 1417 | ks->spidev = spi; |
| 1415 | ks->tx_space = 6144; | 1418 | ks->tx_space = 6144; |
| 1416 | 1419 | ||
| 1420 | ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd"); | ||
| 1421 | if (IS_ERR(ks->vdd_reg)) { | ||
| 1422 | ret = PTR_ERR(ks->vdd_reg); | ||
| 1423 | if (ret == -EPROBE_DEFER) | ||
| 1424 | goto err_reg; | ||
| 1425 | } else { | ||
| 1426 | ret = regulator_enable(ks->vdd_reg); | ||
| 1427 | if (ret) { | ||
| 1428 | dev_err(&spi->dev, "regulator enable fail: %d\n", | ||
| 1429 | ret); | ||
| 1430 | goto err_reg_en; | ||
| 1431 | } | ||
| 1432 | } | ||
| 1433 | |||
| 1434 | |||
| 1417 | mutex_init(&ks->lock); | 1435 | mutex_init(&ks->lock); |
| 1418 | spin_lock_init(&ks->statelock); | 1436 | spin_lock_init(&ks->statelock); |
| 1419 | 1437 | ||
| @@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi) | |||
| 1508 | err_netdev: | 1526 | err_netdev: |
| 1509 | free_irq(ndev->irq, ks); | 1527 | free_irq(ndev->irq, ks); |
| 1510 | 1528 | ||
| 1511 | err_id: | ||
| 1512 | err_irq: | 1529 | err_irq: |
| 1530 | err_id: | ||
| 1531 | if (!IS_ERR(ks->vdd_reg)) | ||
| 1532 | regulator_disable(ks->vdd_reg); | ||
| 1533 | err_reg_en: | ||
| 1534 | if (!IS_ERR(ks->vdd_reg)) | ||
| 1535 | regulator_put(ks->vdd_reg); | ||
| 1536 | err_reg: | ||
| 1513 | free_netdev(ndev); | 1537 | free_netdev(ndev); |
| 1514 | return ret; | 1538 | return ret; |
| 1515 | } | 1539 | } |
| @@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi) | |||
| 1523 | 1547 | ||
| 1524 | unregister_netdev(priv->netdev); | 1548 | unregister_netdev(priv->netdev); |
| 1525 | free_irq(spi->irq, priv); | 1549 | free_irq(spi->irq, priv); |
| 1550 | if (!IS_ERR(priv->vdd_reg)) { | ||
| 1551 | regulator_disable(priv->vdd_reg); | ||
| 1552 | regulator_put(priv->vdd_reg); | ||
| 1553 | } | ||
| 1526 | free_netdev(priv->netdev); | 1554 | free_netdev(priv->netdev); |
| 1527 | 1555 | ||
| 1528 | return 0; | 1556 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 4146664d4d6a..27c4f131863b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
| @@ -340,6 +340,7 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter) | |||
| 340 | if (qlcnic_sriov_vf_check(adapter)) | 340 | if (qlcnic_sriov_vf_check(adapter)) |
| 341 | return -EINVAL; | 341 | return -EINVAL; |
| 342 | num_msix = 1; | 342 | num_msix = 1; |
| 343 | adapter->drv_sds_rings = QLCNIC_SINGLE_RING; | ||
| 343 | adapter->drv_tx_rings = QLCNIC_SINGLE_RING; | 344 | adapter->drv_tx_rings = QLCNIC_SINGLE_RING; |
| 344 | } | 345 | } |
| 345 | } | 346 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c index 77f1bce432d2..7d4f54912bad 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c | |||
| @@ -807,7 +807,7 @@ qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio, | |||
| 807 | !type->tc_param_valid) | 807 | !type->tc_param_valid) |
| 808 | return; | 808 | return; |
| 809 | 809 | ||
| 810 | if (tc < 0 || (tc > QLC_DCB_MAX_TC)) | 810 | if (tc < 0 || (tc >= QLC_DCB_MAX_TC)) |
| 811 | return; | 811 | return; |
| 812 | 812 | ||
| 813 | tc_cfg = &type->tc_cfg[tc]; | 813 | tc_cfg = &type->tc_cfg[tc]; |
| @@ -843,7 +843,7 @@ static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, | |||
| 843 | !type->tc_param_valid) | 843 | !type->tc_param_valid) |
| 844 | return; | 844 | return; |
| 845 | 845 | ||
| 846 | if (pgid < 0 || pgid > QLC_DCB_MAX_PG) | 846 | if (pgid < 0 || pgid >= QLC_DCB_MAX_PG) |
| 847 | return; | 847 | return; |
| 848 | 848 | ||
| 849 | pgcfg = &type->pg_cfg[pgid]; | 849 | pgcfg = &type->pg_cfg[pgid]; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index ba78c7481fa3..1222865cfb73 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
| @@ -816,9 +816,10 @@ static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter) | |||
| 816 | 816 | ||
| 817 | if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { | 817 | if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { |
| 818 | qlcnic_disable_multi_tx(adapter); | 818 | qlcnic_disable_multi_tx(adapter); |
| 819 | adapter->drv_sds_rings = QLCNIC_SINGLE_RING; | ||
| 819 | 820 | ||
| 820 | err = qlcnic_enable_msi_legacy(adapter); | 821 | err = qlcnic_enable_msi_legacy(adapter); |
| 821 | if (!err) | 822 | if (err) |
| 822 | return err; | 823 | return err; |
| 823 | } | 824 | } |
| 824 | } | 825 | } |
| @@ -3863,7 +3864,7 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt, | |||
| 3863 | strcpy(buf, "Tx"); | 3864 | strcpy(buf, "Tx"); |
| 3864 | } | 3865 | } |
| 3865 | 3866 | ||
| 3866 | if (!qlcnic_use_msi_x && !qlcnic_use_msi) { | 3867 | if (!QLCNIC_IS_MSI_FAMILY(adapter)) { |
| 3867 | netdev_err(netdev, "No RSS/TSS support in INT-x mode\n"); | 3868 | netdev_err(netdev, "No RSS/TSS support in INT-x mode\n"); |
| 3868 | return -EINVAL; | 3869 | return -EINVAL; |
| 3869 | } | 3870 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 09acf15c3a56..e5277a632671 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | |||
| @@ -13,8 +13,6 @@ | |||
| 13 | #define QLC_VF_MIN_TX_RATE 100 | 13 | #define QLC_VF_MIN_TX_RATE 100 |
| 14 | #define QLC_VF_MAX_TX_RATE 9999 | 14 | #define QLC_VF_MAX_TX_RATE 9999 |
| 15 | #define QLC_MAC_OPCODE_MASK 0x7 | 15 | #define QLC_MAC_OPCODE_MASK 0x7 |
| 16 | #define QLC_MAC_STAR_ADD 6 | ||
| 17 | #define QLC_MAC_STAR_DEL 7 | ||
| 18 | #define QLC_VF_FLOOD_BIT BIT_16 | 16 | #define QLC_VF_FLOOD_BIT BIT_16 |
| 19 | #define QLC_FLOOD_MODE 0x5 | 17 | #define QLC_FLOOD_MODE 0x5 |
| 20 | 18 | ||
| @@ -1206,13 +1204,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter, | |||
| 1206 | struct qlcnic_vport *vp = vf->vp; | 1204 | struct qlcnic_vport *vp = vf->vp; |
| 1207 | u8 op, new_op; | 1205 | u8 op, new_op; |
| 1208 | 1206 | ||
| 1209 | if (((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_ADD) || | ||
| 1210 | ((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_DEL)) { | ||
| 1211 | netdev_err(adapter->netdev, "MAC + any VLAN filter not allowed from VF %d\n", | ||
| 1212 | vf->pci_func); | ||
| 1213 | return -EINVAL; | ||
| 1214 | } | ||
| 1215 | |||
| 1216 | if (!(cmd->req.arg[1] & BIT_8)) | 1207 | if (!(cmd->req.arg[1] & BIT_8)) |
| 1217 | return -EINVAL; | 1208 | return -EINVAL; |
| 1218 | 1209 | ||
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 91a67ae8f17b..3ff7bc3e7a23 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -209,7 +209,7 @@ static const struct { | |||
| 209 | [RTL_GIGA_MAC_VER_16] = | 209 | [RTL_GIGA_MAC_VER_16] = |
| 210 | _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), | 210 | _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), |
| 211 | [RTL_GIGA_MAC_VER_17] = | 211 | [RTL_GIGA_MAC_VER_17] = |
| 212 | _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false), | 212 | _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), |
| 213 | [RTL_GIGA_MAC_VER_18] = | 213 | [RTL_GIGA_MAC_VER_18] = |
| 214 | _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), | 214 | _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), |
| 215 | [RTL_GIGA_MAC_VER_19] = | 215 | [RTL_GIGA_MAC_VER_19] = |
| @@ -7118,6 +7118,8 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 7118 | } | 7118 | } |
| 7119 | 7119 | ||
| 7120 | mutex_init(&tp->wk.mutex); | 7120 | mutex_init(&tp->wk.mutex); |
| 7121 | u64_stats_init(&tp->rx_stats.syncp); | ||
| 7122 | u64_stats_init(&tp->tx_stats.syncp); | ||
| 7121 | 7123 | ||
| 7122 | /* Get MAC address */ | 7124 | /* Get MAC address */ |
| 7123 | for (i = 0; i < ETH_ALEN; i++) | 7125 | for (i = 0; i < ETH_ALEN; i++) |
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index eb75fbd11a01..d7a36829649a 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c | |||
| @@ -1668,6 +1668,13 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) | |||
| 1668 | struct efx_ptp_data *ptp = efx->ptp_data; | 1668 | struct efx_ptp_data *ptp = efx->ptp_data; |
| 1669 | int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); | 1669 | int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); |
| 1670 | 1670 | ||
| 1671 | if (!ptp) { | ||
| 1672 | if (net_ratelimit()) | ||
| 1673 | netif_warn(efx, drv, efx->net_dev, | ||
| 1674 | "Received PTP event but PTP not set up\n"); | ||
| 1675 | return; | ||
| 1676 | } | ||
| 1677 | |||
| 1671 | if (!ptp->enabled) | 1678 | if (!ptp->enabled) |
| 1672 | return; | 1679 | return; |
| 1673 | 1680 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index 72d282bf33a5..c553f6b5a913 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c | |||
| @@ -151,7 +151,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) | |||
| 151 | sizeof(struct dma_desc))); | 151 | sizeof(struct dma_desc))); |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | const struct stmmac_chain_mode_ops chain_mode_ops = { | 154 | const struct stmmac_mode_ops chain_mode_ops = { |
| 155 | .init = stmmac_init_dma_chain, | 155 | .init = stmmac_init_dma_chain, |
| 156 | .is_jumbo_frm = stmmac_is_jumbo_frm, | 156 | .is_jumbo_frm = stmmac_is_jumbo_frm, |
| 157 | .jumbo_frm = stmmac_jumbo_frm, | 157 | .jumbo_frm = stmmac_jumbo_frm, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 7834a3993946..74610f3aca9e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h | |||
| @@ -419,20 +419,13 @@ struct mii_regs { | |||
| 419 | unsigned int data; /* MII Data */ | 419 | unsigned int data; /* MII Data */ |
| 420 | }; | 420 | }; |
| 421 | 421 | ||
| 422 | struct stmmac_ring_mode_ops { | 422 | struct stmmac_mode_ops { |
| 423 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); | ||
| 424 | unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); | ||
| 425 | void (*refill_desc3) (void *priv, struct dma_desc *p); | ||
| 426 | void (*init_desc3) (struct dma_desc *p); | ||
| 427 | void (*clean_desc3) (void *priv, struct dma_desc *p); | ||
| 428 | int (*set_16kib_bfsize) (int mtu); | ||
| 429 | }; | ||
| 430 | |||
| 431 | struct stmmac_chain_mode_ops { | ||
| 432 | void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, | 423 | void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, |
| 433 | unsigned int extend_desc); | 424 | unsigned int extend_desc); |
| 434 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); | 425 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); |
| 435 | unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); | 426 | unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); |
| 427 | int (*set_16kib_bfsize)(int mtu); | ||
| 428 | void (*init_desc3)(struct dma_desc *p); | ||
| 436 | void (*refill_desc3) (void *priv, struct dma_desc *p); | 429 | void (*refill_desc3) (void *priv, struct dma_desc *p); |
| 437 | void (*clean_desc3) (void *priv, struct dma_desc *p); | 430 | void (*clean_desc3) (void *priv, struct dma_desc *p); |
| 438 | }; | 431 | }; |
| @@ -441,8 +434,7 @@ struct mac_device_info { | |||
| 441 | const struct stmmac_ops *mac; | 434 | const struct stmmac_ops *mac; |
| 442 | const struct stmmac_desc_ops *desc; | 435 | const struct stmmac_desc_ops *desc; |
| 443 | const struct stmmac_dma_ops *dma; | 436 | const struct stmmac_dma_ops *dma; |
| 444 | const struct stmmac_ring_mode_ops *ring; | 437 | const struct stmmac_mode_ops *mode; |
| 445 | const struct stmmac_chain_mode_ops *chain; | ||
| 446 | const struct stmmac_hwtimestamp *ptp; | 438 | const struct stmmac_hwtimestamp *ptp; |
| 447 | struct mii_regs mii; /* MII register Addresses */ | 439 | struct mii_regs mii; /* MII register Addresses */ |
| 448 | struct mac_link link; | 440 | struct mac_link link; |
| @@ -460,7 +452,7 @@ void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, | |||
| 460 | void stmmac_set_mac(void __iomem *ioaddr, bool enable); | 452 | void stmmac_set_mac(void __iomem *ioaddr, bool enable); |
| 461 | 453 | ||
| 462 | void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); | 454 | void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); |
| 463 | extern const struct stmmac_ring_mode_ops ring_mode_ops; | 455 | extern const struct stmmac_mode_ops ring_mode_ops; |
| 464 | extern const struct stmmac_chain_mode_ops chain_mode_ops; | 456 | extern const struct stmmac_mode_ops chain_mode_ops; |
| 465 | 457 | ||
| 466 | #endif /* __COMMON_H__ */ | 458 | #endif /* __COMMON_H__ */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index a96c7c2f5f3f..650a4be6bce5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c | |||
| @@ -100,10 +100,9 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) | |||
| 100 | { | 100 | { |
| 101 | struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; | 101 | struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; |
| 102 | 102 | ||
| 103 | if (unlikely(priv->plat->has_gmac)) | 103 | /* Fill DES3 in case of RING mode */ |
| 104 | /* Fill DES3 in case of RING mode */ | 104 | if (priv->dma_buf_sz >= BUF_SIZE_8KiB) |
| 105 | if (priv->dma_buf_sz >= BUF_SIZE_8KiB) | 105 | p->des3 = p->des2 + BUF_SIZE_8KiB; |
| 106 | p->des3 = p->des2 + BUF_SIZE_8KiB; | ||
| 107 | } | 106 | } |
| 108 | 107 | ||
| 109 | /* In ring mode we need to fill the desc3 because it is used as buffer */ | 108 | /* In ring mode we need to fill the desc3 because it is used as buffer */ |
| @@ -126,7 +125,7 @@ static int stmmac_set_16kib_bfsize(int mtu) | |||
| 126 | return ret; | 125 | return ret; |
| 127 | } | 126 | } |
| 128 | 127 | ||
| 129 | const struct stmmac_ring_mode_ops ring_mode_ops = { | 128 | const struct stmmac_mode_ops ring_mode_ops = { |
| 130 | .is_jumbo_frm = stmmac_is_jumbo_frm, | 129 | .is_jumbo_frm = stmmac_is_jumbo_frm, |
| 131 | .jumbo_frm = stmmac_jumbo_frm, | 130 | .jumbo_frm = stmmac_jumbo_frm, |
| 132 | .refill_desc3 = stmmac_refill_desc3, | 131 | .refill_desc3 = stmmac_refill_desc3, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a2e7d2c96e36..8543e1cfd55e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -92,8 +92,8 @@ static int tc = TC_DEFAULT; | |||
| 92 | module_param(tc, int, S_IRUGO | S_IWUSR); | 92 | module_param(tc, int, S_IRUGO | S_IWUSR); |
| 93 | MODULE_PARM_DESC(tc, "DMA threshold control value"); | 93 | MODULE_PARM_DESC(tc, "DMA threshold control value"); |
| 94 | 94 | ||
| 95 | #define DMA_BUFFER_SIZE BUF_SIZE_4KiB | 95 | #define DEFAULT_BUFSIZE 1536 |
| 96 | static int buf_sz = DMA_BUFFER_SIZE; | 96 | static int buf_sz = DEFAULT_BUFSIZE; |
| 97 | module_param(buf_sz, int, S_IRUGO | S_IWUSR); | 97 | module_param(buf_sz, int, S_IRUGO | S_IWUSR); |
| 98 | MODULE_PARM_DESC(buf_sz, "DMA buffer size"); | 98 | MODULE_PARM_DESC(buf_sz, "DMA buffer size"); |
| 99 | 99 | ||
| @@ -136,8 +136,8 @@ static void stmmac_verify_args(void) | |||
| 136 | dma_rxsize = DMA_RX_SIZE; | 136 | dma_rxsize = DMA_RX_SIZE; |
| 137 | if (unlikely(dma_txsize < 0)) | 137 | if (unlikely(dma_txsize < 0)) |
| 138 | dma_txsize = DMA_TX_SIZE; | 138 | dma_txsize = DMA_TX_SIZE; |
| 139 | if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB))) | 139 | if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) |
| 140 | buf_sz = DMA_BUFFER_SIZE; | 140 | buf_sz = DEFAULT_BUFSIZE; |
| 141 | if (unlikely(flow_ctrl > 1)) | 141 | if (unlikely(flow_ctrl > 1)) |
| 142 | flow_ctrl = FLOW_AUTO; | 142 | flow_ctrl = FLOW_AUTO; |
| 143 | else if (likely(flow_ctrl < 0)) | 143 | else if (likely(flow_ctrl < 0)) |
| @@ -286,10 +286,25 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
| 286 | 286 | ||
| 287 | /* MAC core supports the EEE feature. */ | 287 | /* MAC core supports the EEE feature. */ |
| 288 | if (priv->dma_cap.eee) { | 288 | if (priv->dma_cap.eee) { |
| 289 | int tx_lpi_timer = priv->tx_lpi_timer; | ||
| 290 | |||
| 289 | /* Check if the PHY supports EEE */ | 291 | /* Check if the PHY supports EEE */ |
| 290 | if (phy_init_eee(priv->phydev, 1)) | 292 | if (phy_init_eee(priv->phydev, 1)) { |
| 293 | /* To manage at run-time if the EEE cannot be supported | ||
| 294 | * anymore (for example because the lp caps have been | ||
| 295 | * changed). | ||
| 296 | * In that case the driver disable own timers. | ||
| 297 | */ | ||
| 298 | if (priv->eee_active) { | ||
| 299 | pr_debug("stmmac: disable EEE\n"); | ||
| 300 | del_timer_sync(&priv->eee_ctrl_timer); | ||
| 301 | priv->hw->mac->set_eee_timer(priv->ioaddr, 0, | ||
| 302 | tx_lpi_timer); | ||
| 303 | } | ||
| 304 | priv->eee_active = 0; | ||
| 291 | goto out; | 305 | goto out; |
| 292 | 306 | } | |
| 307 | /* Activate the EEE and start timers */ | ||
| 293 | if (!priv->eee_active) { | 308 | if (!priv->eee_active) { |
| 294 | priv->eee_active = 1; | 309 | priv->eee_active = 1; |
| 295 | init_timer(&priv->eee_ctrl_timer); | 310 | init_timer(&priv->eee_ctrl_timer); |
| @@ -300,13 +315,13 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
| 300 | 315 | ||
| 301 | priv->hw->mac->set_eee_timer(priv->ioaddr, | 316 | priv->hw->mac->set_eee_timer(priv->ioaddr, |
| 302 | STMMAC_DEFAULT_LIT_LS, | 317 | STMMAC_DEFAULT_LIT_LS, |
| 303 | priv->tx_lpi_timer); | 318 | tx_lpi_timer); |
| 304 | } else | 319 | } else |
| 305 | /* Set HW EEE according to the speed */ | 320 | /* Set HW EEE according to the speed */ |
| 306 | priv->hw->mac->set_eee_pls(priv->ioaddr, | 321 | priv->hw->mac->set_eee_pls(priv->ioaddr, |
| 307 | priv->phydev->link); | 322 | priv->phydev->link); |
| 308 | 323 | ||
| 309 | pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); | 324 | pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); |
| 310 | 325 | ||
| 311 | ret = true; | 326 | ret = true; |
| 312 | } | 327 | } |
| @@ -886,10 +901,10 @@ static int stmmac_set_bfsize(int mtu, int bufsize) | |||
| 886 | ret = BUF_SIZE_8KiB; | 901 | ret = BUF_SIZE_8KiB; |
| 887 | else if (mtu >= BUF_SIZE_2KiB) | 902 | else if (mtu >= BUF_SIZE_2KiB) |
| 888 | ret = BUF_SIZE_4KiB; | 903 | ret = BUF_SIZE_4KiB; |
| 889 | else if (mtu >= DMA_BUFFER_SIZE) | 904 | else if (mtu > DEFAULT_BUFSIZE) |
| 890 | ret = BUF_SIZE_2KiB; | 905 | ret = BUF_SIZE_2KiB; |
| 891 | else | 906 | else |
| 892 | ret = DMA_BUFFER_SIZE; | 907 | ret = DEFAULT_BUFSIZE; |
| 893 | 908 | ||
| 894 | return ret; | 909 | return ret; |
| 895 | } | 910 | } |
| @@ -951,9 +966,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
| 951 | 966 | ||
| 952 | p->des2 = priv->rx_skbuff_dma[i]; | 967 | p->des2 = priv->rx_skbuff_dma[i]; |
| 953 | 968 | ||
| 954 | if ((priv->mode == STMMAC_RING_MODE) && | 969 | if ((priv->hw->mode->init_desc3) && |
| 955 | (priv->dma_buf_sz == BUF_SIZE_16KiB)) | 970 | (priv->dma_buf_sz == BUF_SIZE_16KiB)) |
| 956 | priv->hw->ring->init_desc3(p); | 971 | priv->hw->mode->init_desc3(p); |
| 957 | 972 | ||
| 958 | return 0; | 973 | return 0; |
| 959 | } | 974 | } |
| @@ -984,11 +999,8 @@ static int init_dma_desc_rings(struct net_device *dev) | |||
| 984 | unsigned int bfsize = 0; | 999 | unsigned int bfsize = 0; |
| 985 | int ret = -ENOMEM; | 1000 | int ret = -ENOMEM; |
| 986 | 1001 | ||
| 987 | /* Set the max buffer size according to the DESC mode | 1002 | if (priv->hw->mode->set_16kib_bfsize) |
| 988 | * and the MTU. Note that RING mode allows 16KiB bsize. | 1003 | bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); |
| 989 | */ | ||
| 990 | if (priv->mode == STMMAC_RING_MODE) | ||
| 991 | bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); | ||
| 992 | 1004 | ||
| 993 | if (bfsize < BUF_SIZE_16KiB) | 1005 | if (bfsize < BUF_SIZE_16KiB) |
| 994 | bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); | 1006 | bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); |
| @@ -1029,15 +1041,15 @@ static int init_dma_desc_rings(struct net_device *dev) | |||
| 1029 | /* Setup the chained descriptor addresses */ | 1041 | /* Setup the chained descriptor addresses */ |
| 1030 | if (priv->mode == STMMAC_CHAIN_MODE) { | 1042 | if (priv->mode == STMMAC_CHAIN_MODE) { |
| 1031 | if (priv->extend_desc) { | 1043 | if (priv->extend_desc) { |
| 1032 | priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy, | 1044 | priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, |
| 1033 | rxsize, 1); | 1045 | rxsize, 1); |
| 1034 | priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy, | 1046 | priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, |
| 1035 | txsize, 1); | 1047 | txsize, 1); |
| 1036 | } else { | 1048 | } else { |
| 1037 | priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy, | 1049 | priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, |
| 1038 | rxsize, 0); | 1050 | rxsize, 0); |
| 1039 | priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy, | 1051 | priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, |
| 1040 | txsize, 0); | 1052 | txsize, 0); |
| 1041 | } | 1053 | } |
| 1042 | } | 1054 | } |
| 1043 | 1055 | ||
| @@ -1288,7 +1300,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) | |||
| 1288 | DMA_TO_DEVICE); | 1300 | DMA_TO_DEVICE); |
| 1289 | priv->tx_skbuff_dma[entry] = 0; | 1301 | priv->tx_skbuff_dma[entry] = 0; |
| 1290 | } | 1302 | } |
| 1291 | priv->hw->ring->clean_desc3(priv, p); | 1303 | priv->hw->mode->clean_desc3(priv, p); |
| 1292 | 1304 | ||
| 1293 | if (likely(skb != NULL)) { | 1305 | if (likely(skb != NULL)) { |
| 1294 | dev_kfree_skb(skb); | 1306 | dev_kfree_skb(skb); |
| @@ -1705,7 +1717,7 @@ static int stmmac_open(struct net_device *dev) | |||
| 1705 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); | 1717 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); |
| 1706 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); | 1718 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); |
| 1707 | 1719 | ||
| 1708 | alloc_dma_desc_resources(priv); | 1720 | ret = alloc_dma_desc_resources(priv); |
| 1709 | if (ret < 0) { | 1721 | if (ret < 0) { |
| 1710 | pr_err("%s: DMA descriptors allocation failed\n", __func__); | 1722 | pr_err("%s: DMA descriptors allocation failed\n", __func__); |
| 1711 | goto dma_desc_error; | 1723 | goto dma_desc_error; |
| @@ -1844,6 +1856,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1844 | int nfrags = skb_shinfo(skb)->nr_frags; | 1856 | int nfrags = skb_shinfo(skb)->nr_frags; |
| 1845 | struct dma_desc *desc, *first; | 1857 | struct dma_desc *desc, *first; |
| 1846 | unsigned int nopaged_len = skb_headlen(skb); | 1858 | unsigned int nopaged_len = skb_headlen(skb); |
| 1859 | unsigned int enh_desc = priv->plat->enh_desc; | ||
| 1847 | 1860 | ||
| 1848 | if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { | 1861 | if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { |
| 1849 | if (!netif_queue_stopped(dev)) { | 1862 | if (!netif_queue_stopped(dev)) { |
| @@ -1871,27 +1884,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1871 | first = desc; | 1884 | first = desc; |
| 1872 | 1885 | ||
| 1873 | /* To program the descriptors according to the size of the frame */ | 1886 | /* To program the descriptors according to the size of the frame */ |
| 1874 | if (priv->mode == STMMAC_RING_MODE) { | 1887 | if (enh_desc) |
| 1875 | is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len, | 1888 | is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); |
| 1876 | priv->plat->enh_desc); | 1889 | |
| 1877 | if (unlikely(is_jumbo)) | ||
| 1878 | entry = priv->hw->ring->jumbo_frm(priv, skb, | ||
| 1879 | csum_insertion); | ||
| 1880 | } else { | ||
| 1881 | is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len, | ||
| 1882 | priv->plat->enh_desc); | ||
| 1883 | if (unlikely(is_jumbo)) | ||
| 1884 | entry = priv->hw->chain->jumbo_frm(priv, skb, | ||
| 1885 | csum_insertion); | ||
| 1886 | } | ||
| 1887 | if (likely(!is_jumbo)) { | 1890 | if (likely(!is_jumbo)) { |
| 1888 | desc->des2 = dma_map_single(priv->device, skb->data, | 1891 | desc->des2 = dma_map_single(priv->device, skb->data, |
| 1889 | nopaged_len, DMA_TO_DEVICE); | 1892 | nopaged_len, DMA_TO_DEVICE); |
| 1890 | priv->tx_skbuff_dma[entry] = desc->des2; | 1893 | priv->tx_skbuff_dma[entry] = desc->des2; |
| 1891 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, | 1894 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, |
| 1892 | csum_insertion, priv->mode); | 1895 | csum_insertion, priv->mode); |
| 1893 | } else | 1896 | } else { |
| 1894 | desc = first; | 1897 | desc = first; |
| 1898 | entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); | ||
| 1899 | } | ||
| 1895 | 1900 | ||
| 1896 | for (i = 0; i < nfrags; i++) { | 1901 | for (i = 0; i < nfrags; i++) { |
| 1897 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1902 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| @@ -2029,7 +2034,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) | |||
| 2029 | 2034 | ||
| 2030 | p->des2 = priv->rx_skbuff_dma[entry]; | 2035 | p->des2 = priv->rx_skbuff_dma[entry]; |
| 2031 | 2036 | ||
| 2032 | priv->hw->ring->refill_desc3(priv, p); | 2037 | priv->hw->mode->refill_desc3(priv, p); |
| 2033 | 2038 | ||
| 2034 | if (netif_msg_rx_status(priv)) | 2039 | if (netif_msg_rx_status(priv)) |
| 2035 | pr_debug("\trefill entry #%d\n", entry); | 2040 | pr_debug("\trefill entry #%d\n", entry); |
| @@ -2633,11 +2638,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
| 2633 | 2638 | ||
| 2634 | /* To use the chained or ring mode */ | 2639 | /* To use the chained or ring mode */ |
| 2635 | if (chain_mode) { | 2640 | if (chain_mode) { |
| 2636 | priv->hw->chain = &chain_mode_ops; | 2641 | priv->hw->mode = &chain_mode_ops; |
| 2637 | pr_info(" Chain mode enabled\n"); | 2642 | pr_info(" Chain mode enabled\n"); |
| 2638 | priv->mode = STMMAC_CHAIN_MODE; | 2643 | priv->mode = STMMAC_CHAIN_MODE; |
| 2639 | } else { | 2644 | } else { |
| 2640 | priv->hw->ring = &ring_mode_ops; | 2645 | priv->hw->mode = &ring_mode_ops; |
| 2641 | pr_info(" Ring mode enabled\n"); | 2646 | pr_info(" Ring mode enabled\n"); |
| 2642 | priv->mode = STMMAC_RING_MODE; | 2647 | priv->mode = STMMAC_RING_MODE; |
| 2643 | } | 2648 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index c61bc72b8e90..8fb32a80f1c1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
| @@ -36,7 +36,7 @@ static const struct of_device_id stmmac_dt_ids[] = { | |||
| 36 | #ifdef CONFIG_DWMAC_STI | 36 | #ifdef CONFIG_DWMAC_STI |
| 37 | { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, | 37 | { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, |
| 38 | { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, | 38 | { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, |
| 39 | { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data}, | 39 | { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data}, |
| 40 | #endif | 40 | #endif |
| 41 | /* SoC specific glue layers should come before generic bindings */ | 41 | /* SoC specific glue layers should come before generic bindings */ |
| 42 | { .compatible = "st,spear600-gmac"}, | 42 | { .compatible = "st,spear600-gmac"}, |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 651087b5c8da..7d6d8ec676c8 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
| @@ -1164,11 +1164,17 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) | |||
| 1164 | 1164 | ||
| 1165 | static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) | 1165 | static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) |
| 1166 | { | 1166 | { |
| 1167 | u32 slave_port; | ||
| 1168 | |||
| 1169 | slave_port = cpsw_get_slave_port(priv, slave->slave_num); | ||
| 1170 | |||
| 1167 | if (!slave->phy) | 1171 | if (!slave->phy) |
| 1168 | return; | 1172 | return; |
| 1169 | phy_stop(slave->phy); | 1173 | phy_stop(slave->phy); |
| 1170 | phy_disconnect(slave->phy); | 1174 | phy_disconnect(slave->phy); |
| 1171 | slave->phy = NULL; | 1175 | slave->phy = NULL; |
| 1176 | cpsw_ale_control_set(priv->ale, slave_port, | ||
| 1177 | ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); | ||
| 1172 | } | 1178 | } |
| 1173 | 1179 | ||
| 1174 | static int cpsw_ndo_open(struct net_device *ndev) | 1180 | static int cpsw_ndo_open(struct net_device *ndev) |
| @@ -2223,10 +2229,6 @@ static int cpsw_probe(struct platform_device *pdev) | |||
| 2223 | goto clean_ale_ret; | 2229 | goto clean_ale_ret; |
| 2224 | } | 2230 | } |
| 2225 | 2231 | ||
| 2226 | if (cpts_register(&pdev->dev, priv->cpts, | ||
| 2227 | data->cpts_clock_mult, data->cpts_clock_shift)) | ||
| 2228 | dev_err(priv->dev, "error registering cpts device\n"); | ||
| 2229 | |||
| 2230 | cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", | 2232 | cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", |
| 2231 | &ss_res->start, ndev->irq); | 2233 | &ss_res->start, ndev->irq); |
| 2232 | 2234 | ||
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 364d0c7952c0..88ef27067bf2 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c | |||
| @@ -355,7 +355,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) | |||
| 355 | int i; | 355 | int i; |
| 356 | 356 | ||
| 357 | spin_lock_irqsave(&ctlr->lock, flags); | 357 | spin_lock_irqsave(&ctlr->lock, flags); |
| 358 | if (ctlr->state != CPDMA_STATE_ACTIVE) { | 358 | if (ctlr->state == CPDMA_STATE_TEARDOWN) { |
| 359 | spin_unlock_irqrestore(&ctlr->lock, flags); | 359 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 360 | return -EINVAL; | 360 | return -EINVAL; |
| 361 | } | 361 | } |
| @@ -891,7 +891,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan) | |||
| 891 | unsigned timeout; | 891 | unsigned timeout; |
| 892 | 892 | ||
| 893 | spin_lock_irqsave(&chan->lock, flags); | 893 | spin_lock_irqsave(&chan->lock, flags); |
| 894 | if (chan->state != CPDMA_STATE_ACTIVE) { | 894 | if (chan->state == CPDMA_STATE_TEARDOWN) { |
| 895 | spin_unlock_irqrestore(&chan->lock, flags); | 895 | spin_unlock_irqrestore(&chan->lock, flags); |
| 896 | return -EINVAL; | 896 | return -EINVAL; |
| 897 | } | 897 | } |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index cd9b164a0434..8f0e69ce07ca 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
| @@ -1532,9 +1532,9 @@ static int emac_dev_open(struct net_device *ndev) | |||
| 1532 | struct device *emac_dev = &ndev->dev; | 1532 | struct device *emac_dev = &ndev->dev; |
| 1533 | u32 cnt; | 1533 | u32 cnt; |
| 1534 | struct resource *res; | 1534 | struct resource *res; |
| 1535 | int ret; | 1535 | int q, m, ret; |
| 1536 | int res_num = 0, irq_num = 0; | ||
| 1536 | int i = 0; | 1537 | int i = 0; |
| 1537 | int k = 0; | ||
| 1538 | struct emac_priv *priv = netdev_priv(ndev); | 1538 | struct emac_priv *priv = netdev_priv(ndev); |
| 1539 | 1539 | ||
| 1540 | pm_runtime_get(&priv->pdev->dev); | 1540 | pm_runtime_get(&priv->pdev->dev); |
| @@ -1564,15 +1564,24 @@ static int emac_dev_open(struct net_device *ndev) | |||
| 1564 | } | 1564 | } |
| 1565 | 1565 | ||
| 1566 | /* Request IRQ */ | 1566 | /* Request IRQ */ |
| 1567 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, | ||
| 1568 | res_num))) { | ||
| 1569 | for (irq_num = res->start; irq_num <= res->end; irq_num++) { | ||
| 1570 | dev_err(emac_dev, "Request IRQ %d\n", irq_num); | ||
| 1571 | if (request_irq(irq_num, emac_irq, 0, ndev->name, | ||
| 1572 | ndev)) { | ||
| 1573 | dev_err(emac_dev, | ||
| 1574 | "DaVinci EMAC: request_irq() failed\n"); | ||
| 1575 | ret = -EBUSY; | ||
| 1567 | 1576 | ||
| 1568 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { | ||
| 1569 | for (i = res->start; i <= res->end; i++) { | ||
| 1570 | if (devm_request_irq(&priv->pdev->dev, i, emac_irq, | ||
| 1571 | 0, ndev->name, ndev)) | ||
| 1572 | goto rollback; | 1577 | goto rollback; |
| 1578 | } | ||
| 1573 | } | 1579 | } |
| 1574 | k++; | 1580 | res_num++; |
| 1575 | } | 1581 | } |
| 1582 | /* prepare counters for rollback in case of an error */ | ||
| 1583 | res_num--; | ||
| 1584 | irq_num--; | ||
| 1576 | 1585 | ||
| 1577 | /* Start/Enable EMAC hardware */ | 1586 | /* Start/Enable EMAC hardware */ |
| 1578 | emac_hw_enable(priv); | 1587 | emac_hw_enable(priv); |
| @@ -1639,11 +1648,23 @@ static int emac_dev_open(struct net_device *ndev) | |||
| 1639 | 1648 | ||
| 1640 | return 0; | 1649 | return 0; |
| 1641 | 1650 | ||
| 1642 | rollback: | ||
| 1643 | |||
| 1644 | dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed"); | ||
| 1645 | ret = -EBUSY; | ||
| 1646 | err: | 1651 | err: |
| 1652 | emac_int_disable(priv); | ||
| 1653 | napi_disable(&priv->napi); | ||
| 1654 | |||
| 1655 | rollback: | ||
| 1656 | for (q = res_num; q >= 0; q--) { | ||
| 1657 | res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q); | ||
| 1658 | /* at the first iteration, irq_num is already set to the | ||
| 1659 | * right value | ||
| 1660 | */ | ||
| 1661 | if (q != res_num) | ||
| 1662 | irq_num = res->end; | ||
| 1663 | |||
| 1664 | for (m = irq_num; m >= res->start; m--) | ||
| 1665 | free_irq(m, ndev); | ||
| 1666 | } | ||
| 1667 | cpdma_ctlr_stop(priv->dma); | ||
| 1647 | pm_runtime_put(&priv->pdev->dev); | 1668 | pm_runtime_put(&priv->pdev->dev); |
| 1648 | return ret; | 1669 | return ret; |
| 1649 | } | 1670 | } |
| @@ -1659,6 +1680,9 @@ err: | |||
| 1659 | */ | 1680 | */ |
| 1660 | static int emac_dev_stop(struct net_device *ndev) | 1681 | static int emac_dev_stop(struct net_device *ndev) |
| 1661 | { | 1682 | { |
| 1683 | struct resource *res; | ||
| 1684 | int i = 0; | ||
| 1685 | int irq_num; | ||
| 1662 | struct emac_priv *priv = netdev_priv(ndev); | 1686 | struct emac_priv *priv = netdev_priv(ndev); |
| 1663 | struct device *emac_dev = &ndev->dev; | 1687 | struct device *emac_dev = &ndev->dev; |
| 1664 | 1688 | ||
| @@ -1674,6 +1698,13 @@ static int emac_dev_stop(struct net_device *ndev) | |||
| 1674 | if (priv->phydev) | 1698 | if (priv->phydev) |
| 1675 | phy_disconnect(priv->phydev); | 1699 | phy_disconnect(priv->phydev); |
| 1676 | 1700 | ||
| 1701 | /* Free IRQ */ | ||
| 1702 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { | ||
| 1703 | for (irq_num = res->start; irq_num <= res->end; irq_num++) | ||
| 1704 | free_irq(irq_num, priv->ndev); | ||
| 1705 | i++; | ||
| 1706 | } | ||
| 1707 | |||
| 1677 | if (netif_msg_drv(priv)) | 1708 | if (netif_msg_drv(priv)) |
| 1678 | dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); | 1709 | dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); |
| 1679 | 1710 | ||
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index ef312bc6b865..6ac20a6738f4 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c | |||
| @@ -923,7 +923,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 923 | if (rc) { | 923 | if (rc) { |
| 924 | dev_err(&pdev->dev, | 924 | dev_err(&pdev->dev, |
| 925 | "32-bit PCI DMA addresses not supported by the card!?\n"); | 925 | "32-bit PCI DMA addresses not supported by the card!?\n"); |
| 926 | goto err_out; | 926 | goto err_out_pci_disable; |
| 927 | } | 927 | } |
| 928 | 928 | ||
| 929 | /* sanity check */ | 929 | /* sanity check */ |
| @@ -931,7 +931,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 931 | (pci_resource_len(pdev, 1) < io_size)) { | 931 | (pci_resource_len(pdev, 1) < io_size)) { |
| 932 | rc = -EIO; | 932 | rc = -EIO; |
| 933 | dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); | 933 | dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); |
| 934 | goto err_out; | 934 | goto err_out_pci_disable; |
| 935 | } | 935 | } |
| 936 | 936 | ||
| 937 | pioaddr = pci_resource_start(pdev, 0); | 937 | pioaddr = pci_resource_start(pdev, 0); |
| @@ -942,7 +942,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 942 | dev = alloc_etherdev(sizeof(struct rhine_private)); | 942 | dev = alloc_etherdev(sizeof(struct rhine_private)); |
| 943 | if (!dev) { | 943 | if (!dev) { |
| 944 | rc = -ENOMEM; | 944 | rc = -ENOMEM; |
| 945 | goto err_out; | 945 | goto err_out_pci_disable; |
| 946 | } | 946 | } |
| 947 | SET_NETDEV_DEV(dev, &pdev->dev); | 947 | SET_NETDEV_DEV(dev, &pdev->dev); |
| 948 | 948 | ||
| @@ -1084,6 +1084,8 @@ err_out_free_res: | |||
| 1084 | pci_release_regions(pdev); | 1084 | pci_release_regions(pdev); |
| 1085 | err_out_free_netdev: | 1085 | err_out_free_netdev: |
| 1086 | free_netdev(dev); | 1086 | free_netdev(dev); |
| 1087 | err_out_pci_disable: | ||
| 1088 | pci_disable_device(pdev); | ||
| 1087 | err_out: | 1089 | err_out: |
| 1088 | return rc; | 1090 | return rc; |
| 1089 | } | 1091 | } |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 7141a1937360..d6fce9750b95 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -442,6 +442,8 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 442 | if (!net) | 442 | if (!net) |
| 443 | return -ENOMEM; | 443 | return -ENOMEM; |
| 444 | 444 | ||
| 445 | netif_carrier_off(net); | ||
| 446 | |||
| 445 | net_device_ctx = netdev_priv(net); | 447 | net_device_ctx = netdev_priv(net); |
| 446 | net_device_ctx->device_ctx = dev; | 448 | net_device_ctx->device_ctx = dev; |
| 447 | hv_set_drvdata(dev, net); | 449 | hv_set_drvdata(dev, net); |
| @@ -473,6 +475,8 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 473 | pr_err("Unable to register netdev.\n"); | 475 | pr_err("Unable to register netdev.\n"); |
| 474 | rndis_filter_device_remove(dev); | 476 | rndis_filter_device_remove(dev); |
| 475 | free_netdev(net); | 477 | free_netdev(net); |
| 478 | } else { | ||
| 479 | schedule_delayed_work(&net_device_ctx->dwork, 0); | ||
| 476 | } | 480 | } |
| 477 | 481 | ||
| 478 | return ret; | 482 | return ret; |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 1084e5de3ceb..b54fd257652b 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
| @@ -243,6 +243,22 @@ static int rndis_filter_send_request(struct rndis_device *dev, | |||
| 243 | return ret; | 243 | return ret; |
| 244 | } | 244 | } |
| 245 | 245 | ||
| 246 | static void rndis_set_link_state(struct rndis_device *rdev, | ||
| 247 | struct rndis_request *request) | ||
| 248 | { | ||
| 249 | u32 link_status; | ||
| 250 | struct rndis_query_complete *query_complete; | ||
| 251 | |||
| 252 | query_complete = &request->response_msg.msg.query_complete; | ||
| 253 | |||
| 254 | if (query_complete->status == RNDIS_STATUS_SUCCESS && | ||
| 255 | query_complete->info_buflen == sizeof(u32)) { | ||
| 256 | memcpy(&link_status, (void *)((unsigned long)query_complete + | ||
| 257 | query_complete->info_buf_offset), sizeof(u32)); | ||
| 258 | rdev->link_state = link_status != 0; | ||
| 259 | } | ||
| 260 | } | ||
| 261 | |||
| 246 | static void rndis_filter_receive_response(struct rndis_device *dev, | 262 | static void rndis_filter_receive_response(struct rndis_device *dev, |
| 247 | struct rndis_message *resp) | 263 | struct rndis_message *resp) |
| 248 | { | 264 | { |
| @@ -272,6 +288,10 @@ static void rndis_filter_receive_response(struct rndis_device *dev, | |||
| 272 | sizeof(struct rndis_message) + RNDIS_EXT_LEN) { | 288 | sizeof(struct rndis_message) + RNDIS_EXT_LEN) { |
| 273 | memcpy(&request->response_msg, resp, | 289 | memcpy(&request->response_msg, resp, |
| 274 | resp->msg_len); | 290 | resp->msg_len); |
| 291 | if (request->request_msg.ndis_msg_type == | ||
| 292 | RNDIS_MSG_QUERY && request->request_msg.msg. | ||
| 293 | query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS) | ||
| 294 | rndis_set_link_state(dev, request); | ||
| 275 | } else { | 295 | } else { |
| 276 | netdev_err(ndev, | 296 | netdev_err(ndev, |
| 277 | "rndis response buffer overflow " | 297 | "rndis response buffer overflow " |
| @@ -620,7 +640,6 @@ static int rndis_filter_query_device_link_status(struct rndis_device *dev) | |||
| 620 | ret = rndis_filter_query_device(dev, | 640 | ret = rndis_filter_query_device(dev, |
| 621 | RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, | 641 | RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, |
| 622 | &link_status, &size); | 642 | &link_status, &size); |
| 623 | dev->link_state = (link_status != 0) ? true : false; | ||
| 624 | 643 | ||
| 625 | return ret; | 644 | return ret; |
| 626 | } | 645 | } |
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index ab31544bc254..a30258aad139 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c | |||
| @@ -546,12 +546,12 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) | |||
| 546 | int rc; | 546 | int rc; |
| 547 | unsigned long flags; | 547 | unsigned long flags; |
| 548 | 548 | ||
| 549 | spin_lock(&lp->lock); | 549 | spin_lock_irqsave(&lp->lock, flags); |
| 550 | if (lp->irq_busy) { | 550 | if (lp->irq_busy) { |
| 551 | spin_unlock(&lp->lock); | 551 | spin_unlock_irqrestore(&lp->lock, flags); |
| 552 | return -EBUSY; | 552 | return -EBUSY; |
| 553 | } | 553 | } |
| 554 | spin_unlock(&lp->lock); | 554 | spin_unlock_irqrestore(&lp->lock, flags); |
| 555 | 555 | ||
| 556 | might_sleep(); | 556 | might_sleep(); |
| 557 | 557 | ||
| @@ -725,10 +725,11 @@ static void at86rf230_irqwork_level(struct work_struct *work) | |||
| 725 | static irqreturn_t at86rf230_isr(int irq, void *data) | 725 | static irqreturn_t at86rf230_isr(int irq, void *data) |
| 726 | { | 726 | { |
| 727 | struct at86rf230_local *lp = data; | 727 | struct at86rf230_local *lp = data; |
| 728 | unsigned long flags; | ||
| 728 | 729 | ||
| 729 | spin_lock(&lp->lock); | 730 | spin_lock_irqsave(&lp->lock, flags); |
| 730 | lp->irq_busy = 1; | 731 | lp->irq_busy = 1; |
| 731 | spin_unlock(&lp->lock); | 732 | spin_unlock_irqrestore(&lp->lock, flags); |
| 732 | 733 | ||
| 733 | schedule_work(&lp->irqwork); | 734 | schedule_work(&lp->irqwork); |
| 734 | 735 | ||
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a5d21893670d..1831fb7cd017 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
| @@ -506,6 +506,9 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu) | |||
| 506 | static struct lock_class_key macvlan_netdev_xmit_lock_key; | 506 | static struct lock_class_key macvlan_netdev_xmit_lock_key; |
| 507 | static struct lock_class_key macvlan_netdev_addr_lock_key; | 507 | static struct lock_class_key macvlan_netdev_addr_lock_key; |
| 508 | 508 | ||
| 509 | #define ALWAYS_ON_FEATURES \ | ||
| 510 | (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX) | ||
| 511 | |||
| 509 | #define MACVLAN_FEATURES \ | 512 | #define MACVLAN_FEATURES \ |
| 510 | (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ | 513 | (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ |
| 511 | NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ | 514 | NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ |
| @@ -539,7 +542,7 @@ static int macvlan_init(struct net_device *dev) | |||
| 539 | dev->state = (dev->state & ~MACVLAN_STATE_MASK) | | 542 | dev->state = (dev->state & ~MACVLAN_STATE_MASK) | |
| 540 | (lowerdev->state & MACVLAN_STATE_MASK); | 543 | (lowerdev->state & MACVLAN_STATE_MASK); |
| 541 | dev->features = lowerdev->features & MACVLAN_FEATURES; | 544 | dev->features = lowerdev->features & MACVLAN_FEATURES; |
| 542 | dev->features |= NETIF_F_LLTX; | 545 | dev->features |= ALWAYS_ON_FEATURES; |
| 543 | dev->gso_max_size = lowerdev->gso_max_size; | 546 | dev->gso_max_size = lowerdev->gso_max_size; |
| 544 | dev->iflink = lowerdev->ifindex; | 547 | dev->iflink = lowerdev->ifindex; |
| 545 | dev->hard_header_len = lowerdev->hard_header_len; | 548 | dev->hard_header_len = lowerdev->hard_header_len; |
| @@ -699,7 +702,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, | |||
| 699 | features = netdev_increment_features(vlan->lowerdev->features, | 702 | features = netdev_increment_features(vlan->lowerdev->features, |
| 700 | features, | 703 | features, |
| 701 | mask); | 704 | mask); |
| 702 | features |= NETIF_F_LLTX; | 705 | features |= ALWAYS_ON_FEATURES; |
| 703 | 706 | ||
| 704 | return features; | 707 | return features; |
| 705 | } | 708 | } |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 19c9eca0ef26..76d96b9ebcdb 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
| @@ -164,9 +164,9 @@ static const struct phy_setting settings[] = { | |||
| 164 | * of that setting. Returns the index of the last setting if | 164 | * of that setting. Returns the index of the last setting if |
| 165 | * none of the others match. | 165 | * none of the others match. |
| 166 | */ | 166 | */ |
| 167 | static inline int phy_find_setting(int speed, int duplex) | 167 | static inline unsigned int phy_find_setting(int speed, int duplex) |
| 168 | { | 168 | { |
| 169 | int idx = 0; | 169 | unsigned int idx = 0; |
| 170 | 170 | ||
| 171 | while (idx < ARRAY_SIZE(settings) && | 171 | while (idx < ARRAY_SIZE(settings) && |
| 172 | (settings[idx].speed != speed || settings[idx].duplex != duplex)) | 172 | (settings[idx].speed != speed || settings[idx].duplex != duplex)) |
| @@ -185,7 +185,7 @@ static inline int phy_find_setting(int speed, int duplex) | |||
| 185 | * the mask in features. Returns the index of the last setting | 185 | * the mask in features. Returns the index of the last setting |
| 186 | * if nothing else matches. | 186 | * if nothing else matches. |
| 187 | */ | 187 | */ |
| 188 | static inline int phy_find_valid(int idx, u32 features) | 188 | static inline unsigned int phy_find_valid(unsigned int idx, u32 features) |
| 189 | { | 189 | { |
| 190 | while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) | 190 | while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) |
| 191 | idx++; | 191 | idx++; |
| @@ -204,7 +204,7 @@ static inline int phy_find_valid(int idx, u32 features) | |||
| 204 | static void phy_sanitize_settings(struct phy_device *phydev) | 204 | static void phy_sanitize_settings(struct phy_device *phydev) |
| 205 | { | 205 | { |
| 206 | u32 features = phydev->supported; | 206 | u32 features = phydev->supported; |
| 207 | int idx; | 207 | unsigned int idx; |
| 208 | 208 | ||
| 209 | /* Sanitize settings based on PHY capabilities */ | 209 | /* Sanitize settings based on PHY capabilities */ |
| 210 | if ((features & SUPPORTED_Autoneg) == 0) | 210 | if ((features & SUPPORTED_Autoneg) == 0) |
| @@ -954,7 +954,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
| 954 | (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { | 954 | (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { |
| 955 | int eee_lp, eee_cap, eee_adv; | 955 | int eee_lp, eee_cap, eee_adv; |
| 956 | u32 lp, cap, adv; | 956 | u32 lp, cap, adv; |
| 957 | int idx, status; | 957 | int status; |
| 958 | unsigned int idx; | ||
| 958 | 959 | ||
| 959 | /* Read phy status to properly get the right settings */ | 960 | /* Read phy status to properly get the right settings */ |
| 960 | status = phy_read_status(phydev); | 961 | status = phy_read_status(phydev); |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 82514e72b3d8..2f6989b1e0dc 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -683,10 +683,9 @@ EXPORT_SYMBOL(phy_detach); | |||
| 683 | int phy_suspend(struct phy_device *phydev) | 683 | int phy_suspend(struct phy_device *phydev) |
| 684 | { | 684 | { |
| 685 | struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); | 685 | struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); |
| 686 | struct ethtool_wolinfo wol; | 686 | struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; |
| 687 | 687 | ||
| 688 | /* If the device has WOL enabled, we cannot suspend the PHY */ | 688 | /* If the device has WOL enabled, we cannot suspend the PHY */ |
| 689 | wol.cmd = ETHTOOL_GWOL; | ||
| 690 | phy_ethtool_get_wol(phydev, &wol); | 689 | phy_ethtool_get_wol(phydev, &wol); |
| 691 | if (wol.wolopts) | 690 | if (wol.wolopts) |
| 692 | return -EBUSY; | 691 | return -EBUSY; |
| @@ -916,6 +915,8 @@ int genphy_read_status(struct phy_device *phydev) | |||
| 916 | int err; | 915 | int err; |
| 917 | int lpa; | 916 | int lpa; |
| 918 | int lpagb = 0; | 917 | int lpagb = 0; |
| 918 | int common_adv; | ||
| 919 | int common_adv_gb = 0; | ||
| 919 | 920 | ||
| 920 | /* Update the link, but return if there was an error */ | 921 | /* Update the link, but return if there was an error */ |
| 921 | err = genphy_update_link(phydev); | 922 | err = genphy_update_link(phydev); |
| @@ -937,7 +938,7 @@ int genphy_read_status(struct phy_device *phydev) | |||
| 937 | 938 | ||
| 938 | phydev->lp_advertising = | 939 | phydev->lp_advertising = |
| 939 | mii_stat1000_to_ethtool_lpa_t(lpagb); | 940 | mii_stat1000_to_ethtool_lpa_t(lpagb); |
| 940 | lpagb &= adv << 2; | 941 | common_adv_gb = lpagb & adv << 2; |
| 941 | } | 942 | } |
| 942 | 943 | ||
| 943 | lpa = phy_read(phydev, MII_LPA); | 944 | lpa = phy_read(phydev, MII_LPA); |
| @@ -950,25 +951,25 @@ int genphy_read_status(struct phy_device *phydev) | |||
| 950 | if (adv < 0) | 951 | if (adv < 0) |
| 951 | return adv; | 952 | return adv; |
| 952 | 953 | ||
| 953 | lpa &= adv; | 954 | common_adv = lpa & adv; |
| 954 | 955 | ||
| 955 | phydev->speed = SPEED_10; | 956 | phydev->speed = SPEED_10; |
| 956 | phydev->duplex = DUPLEX_HALF; | 957 | phydev->duplex = DUPLEX_HALF; |
| 957 | phydev->pause = 0; | 958 | phydev->pause = 0; |
| 958 | phydev->asym_pause = 0; | 959 | phydev->asym_pause = 0; |
| 959 | 960 | ||
| 960 | if (lpagb & (LPA_1000FULL | LPA_1000HALF)) { | 961 | if (common_adv_gb & (LPA_1000FULL | LPA_1000HALF)) { |
| 961 | phydev->speed = SPEED_1000; | 962 | phydev->speed = SPEED_1000; |
| 962 | 963 | ||
| 963 | if (lpagb & LPA_1000FULL) | 964 | if (common_adv_gb & LPA_1000FULL) |
| 964 | phydev->duplex = DUPLEX_FULL; | 965 | phydev->duplex = DUPLEX_FULL; |
| 965 | } else if (lpa & (LPA_100FULL | LPA_100HALF)) { | 966 | } else if (common_adv & (LPA_100FULL | LPA_100HALF)) { |
| 966 | phydev->speed = SPEED_100; | 967 | phydev->speed = SPEED_100; |
| 967 | 968 | ||
| 968 | if (lpa & LPA_100FULL) | 969 | if (common_adv & LPA_100FULL) |
| 969 | phydev->duplex = DUPLEX_FULL; | 970 | phydev->duplex = DUPLEX_FULL; |
| 970 | } else | 971 | } else |
| 971 | if (lpa & LPA_10FULL) | 972 | if (common_adv & LPA_10FULL) |
| 972 | phydev->duplex = DUPLEX_FULL; | 973 | phydev->duplex = DUPLEX_FULL; |
| 973 | 974 | ||
| 974 | if (phydev->duplex == DUPLEX_FULL) { | 975 | if (phydev->duplex == DUPLEX_FULL) { |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 8fe9cb7d0f72..26f8635b027d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -1686,7 +1686,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
| 1686 | TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | | 1686 | TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | |
| 1687 | NETIF_F_HW_VLAN_STAG_TX; | 1687 | NETIF_F_HW_VLAN_STAG_TX; |
| 1688 | dev->features = dev->hw_features; | 1688 | dev->features = dev->hw_features; |
| 1689 | dev->vlan_features = dev->features; | 1689 | dev->vlan_features = dev->features & |
| 1690 | ~(NETIF_F_HW_VLAN_CTAG_TX | | ||
| 1691 | NETIF_F_HW_VLAN_STAG_TX); | ||
| 1690 | 1692 | ||
| 1691 | INIT_LIST_HEAD(&tun->disabled); | 1693 | INIT_LIST_HEAD(&tun->disabled); |
| 1692 | err = tun_attach(tun, file, false); | 1694 | err = tun_attach(tun, file, false); |
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index 433f0a00c683..e2797f1e1b31 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile | |||
| @@ -11,7 +11,7 @@ obj-$(CONFIG_USB_HSO) += hso.o | |||
| 11 | obj-$(CONFIG_USB_NET_AX8817X) += asix.o | 11 | obj-$(CONFIG_USB_NET_AX8817X) += asix.o |
| 12 | asix-y := asix_devices.o asix_common.o ax88172a.o | 12 | asix-y := asix_devices.o asix_common.o ax88172a.o |
| 13 | obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o | 13 | obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o |
| 14 | obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o | 14 | obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o |
| 15 | obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o | 15 | obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o |
| 16 | obj-$(CONFIG_USB_NET_DM9601) += dm9601.o | 16 | obj-$(CONFIG_USB_NET_DM9601) += dm9601.o |
| 17 | obj-$(CONFIG_USB_NET_SR9700) += sr9700.o | 17 | obj-$(CONFIG_USB_NET_SR9700) += sr9700.o |
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 955df81a4358..054e59ca6946 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c | |||
| @@ -1029,20 +1029,12 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 1029 | dev->mii.phy_id = 0x03; | 1029 | dev->mii.phy_id = 0x03; |
| 1030 | dev->mii.supports_gmii = 1; | 1030 | dev->mii.supports_gmii = 1; |
| 1031 | 1031 | ||
| 1032 | if (usb_device_no_sg_constraint(dev->udev)) | ||
| 1033 | dev->can_dma_sg = 1; | ||
| 1034 | |||
| 1035 | dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 1032 | dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
| 1036 | NETIF_F_RXCSUM; | 1033 | NETIF_F_RXCSUM; |
| 1037 | 1034 | ||
| 1038 | dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 1035 | dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
| 1039 | NETIF_F_RXCSUM; | 1036 | NETIF_F_RXCSUM; |
| 1040 | 1037 | ||
| 1041 | if (dev->can_dma_sg) { | ||
| 1042 | dev->net->features |= NETIF_F_SG | NETIF_F_TSO; | ||
| 1043 | dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO; | ||
| 1044 | } | ||
| 1045 | |||
| 1046 | /* Enable checksum offload */ | 1038 | /* Enable checksum offload */ |
| 1047 | *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | | 1039 | *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | |
| 1048 | AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6; | 1040 | AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6; |
| @@ -1395,6 +1387,19 @@ static const struct driver_info ax88178a_info = { | |||
| 1395 | .tx_fixup = ax88179_tx_fixup, | 1387 | .tx_fixup = ax88179_tx_fixup, |
| 1396 | }; | 1388 | }; |
| 1397 | 1389 | ||
| 1390 | static const struct driver_info dlink_dub1312_info = { | ||
| 1391 | .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter", | ||
| 1392 | .bind = ax88179_bind, | ||
| 1393 | .unbind = ax88179_unbind, | ||
| 1394 | .status = ax88179_status, | ||
| 1395 | .link_reset = ax88179_link_reset, | ||
| 1396 | .reset = ax88179_reset, | ||
| 1397 | .stop = ax88179_stop, | ||
| 1398 | .flags = FLAG_ETHER | FLAG_FRAMING_AX, | ||
| 1399 | .rx_fixup = ax88179_rx_fixup, | ||
| 1400 | .tx_fixup = ax88179_tx_fixup, | ||
| 1401 | }; | ||
| 1402 | |||
| 1398 | static const struct driver_info sitecom_info = { | 1403 | static const struct driver_info sitecom_info = { |
| 1399 | .description = "Sitecom USB 3.0 to Gigabit Adapter", | 1404 | .description = "Sitecom USB 3.0 to Gigabit Adapter", |
| 1400 | .bind = ax88179_bind, | 1405 | .bind = ax88179_bind, |
| @@ -1421,6 +1426,19 @@ static const struct driver_info samsung_info = { | |||
| 1421 | .tx_fixup = ax88179_tx_fixup, | 1426 | .tx_fixup = ax88179_tx_fixup, |
| 1422 | }; | 1427 | }; |
| 1423 | 1428 | ||
| 1429 | static const struct driver_info lenovo_info = { | ||
| 1430 | .description = "Lenovo OneLinkDock Gigabit LAN", | ||
| 1431 | .bind = ax88179_bind, | ||
| 1432 | .unbind = ax88179_unbind, | ||
| 1433 | .status = ax88179_status, | ||
| 1434 | .link_reset = ax88179_link_reset, | ||
| 1435 | .reset = ax88179_reset, | ||
| 1436 | .stop = ax88179_stop, | ||
| 1437 | .flags = FLAG_ETHER | FLAG_FRAMING_AX, | ||
| 1438 | .rx_fixup = ax88179_rx_fixup, | ||
| 1439 | .tx_fixup = ax88179_tx_fixup, | ||
| 1440 | }; | ||
| 1441 | |||
| 1424 | static const struct usb_device_id products[] = { | 1442 | static const struct usb_device_id products[] = { |
| 1425 | { | 1443 | { |
| 1426 | /* ASIX AX88179 10/100/1000 */ | 1444 | /* ASIX AX88179 10/100/1000 */ |
| @@ -1431,6 +1449,10 @@ static const struct usb_device_id products[] = { | |||
| 1431 | USB_DEVICE(0x0b95, 0x178a), | 1449 | USB_DEVICE(0x0b95, 0x178a), |
| 1432 | .driver_info = (unsigned long)&ax88178a_info, | 1450 | .driver_info = (unsigned long)&ax88178a_info, |
| 1433 | }, { | 1451 | }, { |
| 1452 | /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ | ||
| 1453 | USB_DEVICE(0x2001, 0x4a00), | ||
| 1454 | .driver_info = (unsigned long)&dlink_dub1312_info, | ||
| 1455 | }, { | ||
| 1434 | /* Sitecom USB 3.0 to Gigabit Adapter */ | 1456 | /* Sitecom USB 3.0 to Gigabit Adapter */ |
| 1435 | USB_DEVICE(0x0df6, 0x0072), | 1457 | USB_DEVICE(0x0df6, 0x0072), |
| 1436 | .driver_info = (unsigned long)&sitecom_info, | 1458 | .driver_info = (unsigned long)&sitecom_info, |
| @@ -1438,6 +1460,10 @@ static const struct usb_device_id products[] = { | |||
| 1438 | /* Samsung USB Ethernet Adapter */ | 1460 | /* Samsung USB Ethernet Adapter */ |
| 1439 | USB_DEVICE(0x04e8, 0xa100), | 1461 | USB_DEVICE(0x04e8, 0xa100), |
| 1440 | .driver_info = (unsigned long)&samsung_info, | 1462 | .driver_info = (unsigned long)&samsung_info, |
| 1463 | }, { | ||
| 1464 | /* Lenovo OneLinkDock Gigabit LAN */ | ||
| 1465 | USB_DEVICE(0x17ef, 0x304b), | ||
| 1466 | .driver_info = (unsigned long)&lenovo_info, | ||
| 1441 | }, | 1467 | }, |
| 1442 | { }, | 1468 | { }, |
| 1443 | }; | 1469 | }; |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 42e176912c8e..bd363b27e854 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
| @@ -652,6 +652,13 @@ static const struct usb_device_id products[] = { | |||
| 652 | .driver_info = 0, | 652 | .driver_info = 0, |
| 653 | }, | 653 | }, |
| 654 | 654 | ||
| 655 | /* Samsung USB Ethernet Adapters */ | ||
| 656 | { | ||
| 657 | USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, 0xa101, USB_CLASS_COMM, | ||
| 658 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
| 659 | .driver_info = 0, | ||
| 660 | }, | ||
| 661 | |||
| 655 | /* WHITELIST!!! | 662 | /* WHITELIST!!! |
| 656 | * | 663 | * |
| 657 | * CDC Ether uses two interfaces, not necessarily consecutive. | 664 | * CDC Ether uses two interfaces, not necessarily consecutive. |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index dbff290ed0e4..d350d2795e10 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
| @@ -68,7 +68,6 @@ static struct usb_driver cdc_ncm_driver; | |||
| 68 | static int cdc_ncm_setup(struct usbnet *dev) | 68 | static int cdc_ncm_setup(struct usbnet *dev) |
| 69 | { | 69 | { |
| 70 | struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; | 70 | struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; |
| 71 | struct usb_cdc_ncm_ntb_parameters ncm_parm; | ||
| 72 | u32 val; | 71 | u32 val; |
| 73 | u8 flags; | 72 | u8 flags; |
| 74 | u8 iface_no; | 73 | u8 iface_no; |
| @@ -82,22 +81,22 @@ static int cdc_ncm_setup(struct usbnet *dev) | |||
| 82 | err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS, | 81 | err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS, |
| 83 | USB_TYPE_CLASS | USB_DIR_IN | 82 | USB_TYPE_CLASS | USB_DIR_IN |
| 84 | |USB_RECIP_INTERFACE, | 83 | |USB_RECIP_INTERFACE, |
| 85 | 0, iface_no, &ncm_parm, | 84 | 0, iface_no, &ctx->ncm_parm, |
| 86 | sizeof(ncm_parm)); | 85 | sizeof(ctx->ncm_parm)); |
| 87 | if (err < 0) { | 86 | if (err < 0) { |
| 88 | dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n"); | 87 | dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n"); |
| 89 | return err; /* GET_NTB_PARAMETERS is required */ | 88 | return err; /* GET_NTB_PARAMETERS is required */ |
| 90 | } | 89 | } |
| 91 | 90 | ||
| 92 | /* read correct set of parameters according to device mode */ | 91 | /* read correct set of parameters according to device mode */ |
| 93 | ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize); | 92 | ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize); |
| 94 | ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize); | 93 | ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize); |
| 95 | ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder); | 94 | ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); |
| 96 | ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor); | 95 | ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); |
| 97 | ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment); | 96 | ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); |
| 98 | /* devices prior to NCM Errata shall set this field to zero */ | 97 | /* devices prior to NCM Errata shall set this field to zero */ |
| 99 | ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams); | 98 | ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); |
| 100 | ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported); | 99 | ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported); |
| 101 | 100 | ||
| 102 | /* there are some minor differences in NCM and MBIM defaults */ | 101 | /* there are some minor differences in NCM and MBIM defaults */ |
| 103 | if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) { | 102 | if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) { |
| @@ -146,7 +145,7 @@ static int cdc_ncm_setup(struct usbnet *dev) | |||
| 146 | } | 145 | } |
| 147 | 146 | ||
| 148 | /* inform device about NTB input size changes */ | 147 | /* inform device about NTB input size changes */ |
| 149 | if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) { | 148 | if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { |
| 150 | __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | 149 | __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); |
| 151 | 150 | ||
| 152 | err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE, | 151 | err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE, |
| @@ -162,14 +161,6 @@ static int cdc_ncm_setup(struct usbnet *dev) | |||
| 162 | dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n", | 161 | dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n", |
| 163 | CDC_NCM_NTB_MAX_SIZE_TX); | 162 | CDC_NCM_NTB_MAX_SIZE_TX); |
| 164 | ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX; | 163 | ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX; |
| 165 | |||
| 166 | /* Adding a pad byte here simplifies the handling in | ||
| 167 | * cdc_ncm_fill_tx_frame, by making tx_max always | ||
| 168 | * represent the real skb max size. | ||
| 169 | */ | ||
| 170 | if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0) | ||
| 171 | ctx->tx_max++; | ||
| 172 | |||
| 173 | } | 164 | } |
| 174 | 165 | ||
| 175 | /* | 166 | /* |
| @@ -439,6 +430,10 @@ advance: | |||
| 439 | goto error2; | 430 | goto error2; |
| 440 | } | 431 | } |
| 441 | 432 | ||
| 433 | /* initialize data interface */ | ||
| 434 | if (cdc_ncm_setup(dev)) | ||
| 435 | goto error2; | ||
| 436 | |||
| 442 | /* configure data interface */ | 437 | /* configure data interface */ |
| 443 | temp = usb_set_interface(dev->udev, iface_no, data_altsetting); | 438 | temp = usb_set_interface(dev->udev, iface_no, data_altsetting); |
| 444 | if (temp) { | 439 | if (temp) { |
| @@ -453,12 +448,6 @@ advance: | |||
| 453 | goto error2; | 448 | goto error2; |
| 454 | } | 449 | } |
| 455 | 450 | ||
| 456 | /* initialize data interface */ | ||
| 457 | if (cdc_ncm_setup(dev)) { | ||
| 458 | dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n"); | ||
| 459 | goto error2; | ||
| 460 | } | ||
| 461 | |||
| 462 | usb_set_intfdata(ctx->data, dev); | 451 | usb_set_intfdata(ctx->data, dev); |
| 463 | usb_set_intfdata(ctx->control, dev); | 452 | usb_set_intfdata(ctx->control, dev); |
| 464 | 453 | ||
| @@ -475,6 +464,15 @@ advance: | |||
| 475 | dev->hard_mtu = ctx->tx_max; | 464 | dev->hard_mtu = ctx->tx_max; |
| 476 | dev->rx_urb_size = ctx->rx_max; | 465 | dev->rx_urb_size = ctx->rx_max; |
| 477 | 466 | ||
| 467 | /* cdc_ncm_setup will override dwNtbOutMaxSize if it is | ||
| 468 | * outside the sane range. Adding a pad byte here if necessary | ||
| 469 | * simplifies the handling in cdc_ncm_fill_tx_frame, making | ||
| 470 | * tx_max always represent the real skb max size. | ||
| 471 | */ | ||
| 472 | if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) && | ||
| 473 | ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0) | ||
| 474 | ctx->tx_max++; | ||
| 475 | |||
| 478 | return 0; | 476 | return 0; |
| 479 | 477 | ||
| 480 | error2: | 478 | error2: |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index d89dbe395ad2..adb12f349a61 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -449,9 +449,6 @@ enum rtl8152_flags { | |||
| 449 | #define MCU_TYPE_PLA 0x0100 | 449 | #define MCU_TYPE_PLA 0x0100 |
| 450 | #define MCU_TYPE_USB 0x0000 | 450 | #define MCU_TYPE_USB 0x0000 |
| 451 | 451 | ||
| 452 | #define REALTEK_USB_DEVICE(vend, prod) \ | ||
| 453 | USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC) | ||
| 454 | |||
| 455 | struct rx_desc { | 452 | struct rx_desc { |
| 456 | __le32 opts1; | 453 | __le32 opts1; |
| 457 | #define RX_LEN_MASK 0x7fff | 454 | #define RX_LEN_MASK 0x7fff |
| @@ -2739,6 +2736,12 @@ static int rtl8152_probe(struct usb_interface *intf, | |||
| 2739 | struct net_device *netdev; | 2736 | struct net_device *netdev; |
| 2740 | int ret; | 2737 | int ret; |
| 2741 | 2738 | ||
| 2739 | if (udev->actconfig->desc.bConfigurationValue != 1) { | ||
| 2740 | usb_driver_set_configuration(udev, 1); | ||
| 2741 | return -ENODEV; | ||
| 2742 | } | ||
| 2743 | |||
| 2744 | usb_reset_device(udev); | ||
| 2742 | netdev = alloc_etherdev(sizeof(struct r8152)); | 2745 | netdev = alloc_etherdev(sizeof(struct r8152)); |
| 2743 | if (!netdev) { | 2746 | if (!netdev) { |
| 2744 | dev_err(&intf->dev, "Out of memory\n"); | 2747 | dev_err(&intf->dev, "Out of memory\n"); |
| @@ -2819,9 +2822,9 @@ static void rtl8152_disconnect(struct usb_interface *intf) | |||
| 2819 | 2822 | ||
| 2820 | /* table of devices that work with this driver */ | 2823 | /* table of devices that work with this driver */ |
| 2821 | static struct usb_device_id rtl8152_table[] = { | 2824 | static struct usb_device_id rtl8152_table[] = { |
| 2822 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)}, | 2825 | {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)}, |
| 2823 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)}, | 2826 | {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)}, |
| 2824 | {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)}, | 2827 | {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)}, |
| 2825 | {} | 2828 | {} |
| 2826 | }; | 2829 | }; |
| 2827 | 2830 | ||
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c deleted file mode 100644 index f0a8791b7636..000000000000 --- a/drivers/net/usb/r815x.c +++ /dev/null | |||
| @@ -1,248 +0,0 @@ | |||
| 1 | #include <linux/module.h> | ||
| 2 | #include <linux/netdevice.h> | ||
| 3 | #include <linux/mii.h> | ||
| 4 | #include <linux/usb.h> | ||
| 5 | #include <linux/usb/cdc.h> | ||
| 6 | #include <linux/usb/usbnet.h> | ||
| 7 | |||
| 8 | #define RTL815x_REQT_READ 0xc0 | ||
| 9 | #define RTL815x_REQT_WRITE 0x40 | ||
| 10 | #define RTL815x_REQ_GET_REGS 0x05 | ||
| 11 | #define RTL815x_REQ_SET_REGS 0x05 | ||
| 12 | |||
| 13 | #define MCU_TYPE_PLA 0x0100 | ||
| 14 | #define OCP_BASE 0xe86c | ||
| 15 | #define BASE_MII 0xa400 | ||
| 16 | |||
| 17 | #define BYTE_EN_DWORD 0xff | ||
| 18 | #define BYTE_EN_WORD 0x33 | ||
| 19 | #define BYTE_EN_BYTE 0x11 | ||
| 20 | |||
| 21 | #define R815x_PHY_ID 32 | ||
| 22 | #define REALTEK_VENDOR_ID 0x0bda | ||
| 23 | |||
| 24 | |||
| 25 | static int pla_read_word(struct usb_device *udev, u16 index) | ||
| 26 | { | ||
| 27 | int ret; | ||
| 28 | u8 shift = index & 2; | ||
| 29 | __le32 *tmp; | ||
| 30 | |||
| 31 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); | ||
| 32 | if (!tmp) | ||
| 33 | return -ENOMEM; | ||
| 34 | |||
| 35 | index &= ~3; | ||
| 36 | |||
| 37 | ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), | ||
| 38 | RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, | ||
| 39 | index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500); | ||
| 40 | if (ret < 0) | ||
| 41 | goto out2; | ||
| 42 | |||
| 43 | ret = __le32_to_cpu(*tmp); | ||
| 44 | ret >>= (shift * 8); | ||
| 45 | ret &= 0xffff; | ||
| 46 | |||
| 47 | out2: | ||
| 48 | kfree(tmp); | ||
| 49 | return ret; | ||
| 50 | } | ||
| 51 | |||
| 52 | static int pla_write_word(struct usb_device *udev, u16 index, u32 data) | ||
| 53 | { | ||
| 54 | __le32 *tmp; | ||
| 55 | u32 mask = 0xffff; | ||
| 56 | u16 byen = BYTE_EN_WORD; | ||
| 57 | u8 shift = index & 2; | ||
| 58 | int ret; | ||
| 59 | |||
| 60 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); | ||
| 61 | if (!tmp) | ||
| 62 | return -ENOMEM; | ||
| 63 | |||
| 64 | data &= mask; | ||
| 65 | |||
| 66 | if (shift) { | ||
| 67 | byen <<= shift; | ||
| 68 | mask <<= (shift * 8); | ||
| 69 | data <<= (shift * 8); | ||
| 70 | index &= ~3; | ||
| 71 | } | ||
| 72 | |||
| 73 | ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), | ||
| 74 | RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, | ||
| 75 | index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500); | ||
| 76 | if (ret < 0) | ||
| 77 | goto out3; | ||
| 78 | |||
| 79 | data |= __le32_to_cpu(*tmp) & ~mask; | ||
| 80 | *tmp = __cpu_to_le32(data); | ||
| 81 | |||
| 82 | ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | ||
| 83 | RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE, | ||
| 84 | index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp), | ||
| 85 | 500); | ||
| 86 | |||
| 87 | out3: | ||
| 88 | kfree(tmp); | ||
| 89 | return ret; | ||
| 90 | } | ||
| 91 | |||
| 92 | static int ocp_reg_read(struct usbnet *dev, u16 addr) | ||
| 93 | { | ||
| 94 | u16 ocp_base, ocp_index; | ||
| 95 | int ret; | ||
| 96 | |||
| 97 | ocp_base = addr & 0xf000; | ||
| 98 | ret = pla_write_word(dev->udev, OCP_BASE, ocp_base); | ||
| 99 | if (ret < 0) | ||
| 100 | goto out; | ||
| 101 | |||
| 102 | ocp_index = (addr & 0x0fff) | 0xb000; | ||
| 103 | ret = pla_read_word(dev->udev, ocp_index); | ||
| 104 | |||
| 105 | out: | ||
| 106 | return ret; | ||
| 107 | } | ||
| 108 | |||
| 109 | static int ocp_reg_write(struct usbnet *dev, u16 addr, u16 data) | ||
| 110 | { | ||
| 111 | u16 ocp_base, ocp_index; | ||
| 112 | int ret; | ||
| 113 | |||
| 114 | ocp_base = addr & 0xf000; | ||
| 115 | ret = pla_write_word(dev->udev, OCP_BASE, ocp_base); | ||
| 116 | if (ret < 0) | ||
| 117 | goto out1; | ||
| 118 | |||
| 119 | ocp_index = (addr & 0x0fff) | 0xb000; | ||
| 120 | ret = pla_write_word(dev->udev, ocp_index, data); | ||
| 121 | |||
| 122 | out1: | ||
| 123 | return ret; | ||
| 124 | } | ||
| 125 | |||
| 126 | static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg) | ||
| 127 | { | ||
| 128 | struct usbnet *dev = netdev_priv(netdev); | ||
| 129 | int ret; | ||
| 130 | |||
| 131 | if (phy_id != R815x_PHY_ID) | ||
| 132 | return -EINVAL; | ||
| 133 | |||
| 134 | if (usb_autopm_get_interface(dev->intf) < 0) | ||
| 135 | return -ENODEV; | ||
| 136 | |||
| 137 | ret = ocp_reg_read(dev, BASE_MII + reg * 2); | ||
| 138 | |||
| 139 | usb_autopm_put_interface(dev->intf); | ||
| 140 | return ret; | ||
| 141 | } | ||
| 142 | |||
| 143 | static | ||
| 144 | void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val) | ||
| 145 | { | ||
| 146 | struct usbnet *dev = netdev_priv(netdev); | ||
| 147 | |||
| 148 | if (phy_id != R815x_PHY_ID) | ||
| 149 | return; | ||
| 150 | |||
| 151 | if (usb_autopm_get_interface(dev->intf) < 0) | ||
| 152 | return; | ||
| 153 | |||
| 154 | ocp_reg_write(dev, BASE_MII + reg * 2, val); | ||
| 155 | |||
| 156 | usb_autopm_put_interface(dev->intf); | ||
| 157 | } | ||
| 158 | |||
| 159 | static int r8153_bind(struct usbnet *dev, struct usb_interface *intf) | ||
| 160 | { | ||
| 161 | int status; | ||
| 162 | |||
| 163 | status = usbnet_cdc_bind(dev, intf); | ||
| 164 | if (status < 0) | ||
| 165 | return status; | ||
| 166 | |||
| 167 | dev->mii.dev = dev->net; | ||
| 168 | dev->mii.mdio_read = r815x_mdio_read; | ||
| 169 | dev->mii.mdio_write = r815x_mdio_write; | ||
| 170 | dev->mii.phy_id_mask = 0x3f; | ||
| 171 | dev->mii.reg_num_mask = 0x1f; | ||
| 172 | dev->mii.phy_id = R815x_PHY_ID; | ||
| 173 | dev->mii.supports_gmii = 1; | ||
| 174 | |||
| 175 | return status; | ||
| 176 | } | ||
| 177 | |||
| 178 | static int r8152_bind(struct usbnet *dev, struct usb_interface *intf) | ||
| 179 | { | ||
| 180 | int status; | ||
| 181 | |||
| 182 | status = usbnet_cdc_bind(dev, intf); | ||
| 183 | if (status < 0) | ||
| 184 | return status; | ||
| 185 | |||
| 186 | dev->mii.dev = dev->net; | ||
| 187 | dev->mii.mdio_read = r815x_mdio_read; | ||
| 188 | dev->mii.mdio_write = r815x_mdio_write; | ||
| 189 | dev->mii.phy_id_mask = 0x3f; | ||
| 190 | dev->mii.reg_num_mask = 0x1f; | ||
| 191 | dev->mii.phy_id = R815x_PHY_ID; | ||
| 192 | dev->mii.supports_gmii = 0; | ||
| 193 | |||
| 194 | return status; | ||
| 195 | } | ||
| 196 | |||
| 197 | static const struct driver_info r8152_info = { | ||
| 198 | .description = "RTL8152 ECM Device", | ||
| 199 | .flags = FLAG_ETHER | FLAG_POINTTOPOINT, | ||
| 200 | .bind = r8152_bind, | ||
| 201 | .unbind = usbnet_cdc_unbind, | ||
| 202 | .status = usbnet_cdc_status, | ||
| 203 | .manage_power = usbnet_manage_power, | ||
| 204 | }; | ||
| 205 | |||
| 206 | static const struct driver_info r8153_info = { | ||
| 207 | .description = "RTL8153 ECM Device", | ||
| 208 | .flags = FLAG_ETHER | FLAG_POINTTOPOINT, | ||
| 209 | .bind = r8153_bind, | ||
| 210 | .unbind = usbnet_cdc_unbind, | ||
| 211 | .status = usbnet_cdc_status, | ||
| 212 | .manage_power = usbnet_manage_power, | ||
| 213 | }; | ||
| 214 | |||
| 215 | static const struct usb_device_id products[] = { | ||
| 216 | { | ||
| 217 | USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM, | ||
| 218 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
| 219 | .driver_info = (unsigned long) &r8152_info, | ||
| 220 | }, | ||
| 221 | |||
| 222 | { | ||
| 223 | USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM, | ||
| 224 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
| 225 | .driver_info = (unsigned long) &r8153_info, | ||
| 226 | }, | ||
| 227 | |||
| 228 | { }, /* END */ | ||
| 229 | }; | ||
| 230 | MODULE_DEVICE_TABLE(usb, products); | ||
| 231 | |||
| 232 | static struct usb_driver r815x_driver = { | ||
| 233 | .name = "r815x", | ||
| 234 | .id_table = products, | ||
| 235 | .probe = usbnet_probe, | ||
| 236 | .disconnect = usbnet_disconnect, | ||
| 237 | .suspend = usbnet_suspend, | ||
| 238 | .resume = usbnet_resume, | ||
| 239 | .reset_resume = usbnet_resume, | ||
| 240 | .supports_autosuspend = 1, | ||
| 241 | .disable_hub_initiated_lpm = 1, | ||
| 242 | }; | ||
| 243 | |||
| 244 | module_usb_driver(r815x_driver); | ||
| 245 | |||
| 246 | MODULE_AUTHOR("Hayes Wang"); | ||
| 247 | MODULE_DESCRIPTION("Realtek USB ECM device"); | ||
| 248 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 2ec2041b62d4..5b374370f71c 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c | |||
| @@ -285,7 +285,8 @@ static void veth_setup(struct net_device *dev) | |||
| 285 | dev->ethtool_ops = &veth_ethtool_ops; | 285 | dev->ethtool_ops = &veth_ethtool_ops; |
| 286 | dev->features |= NETIF_F_LLTX; | 286 | dev->features |= NETIF_F_LLTX; |
| 287 | dev->features |= VETH_FEATURES; | 287 | dev->features |= VETH_FEATURES; |
| 288 | dev->vlan_features = dev->features; | 288 | dev->vlan_features = dev->features & |
| 289 | ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); | ||
| 289 | dev->destructor = veth_dev_free; | 290 | dev->destructor = veth_dev_free; |
| 290 | 291 | ||
| 291 | dev->hw_features = VETH_FEATURES; | 292 | dev->hw_features = VETH_FEATURES; |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d75f8edf4fb3..5632a99cbbd2 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -1711,7 +1711,8 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
| 1711 | /* If we can receive ANY GSO packets, we must allocate large ones. */ | 1711 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
| 1712 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || | 1712 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 1713 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || | 1713 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || |
| 1714 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) | 1714 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || |
| 1715 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) | ||
| 1715 | vi->big_packets = true; | 1716 | vi->big_packets = true; |
| 1716 | 1717 | ||
| 1717 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) | 1718 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 3be786faaaec..0fa3b44f7342 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
| @@ -1762,11 +1762,20 @@ vmxnet3_netpoll(struct net_device *netdev) | |||
| 1762 | { | 1762 | { |
| 1763 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1763 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
| 1764 | 1764 | ||
| 1765 | if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) | 1765 | switch (adapter->intr.type) { |
| 1766 | vmxnet3_disable_all_intrs(adapter); | 1766 | #ifdef CONFIG_PCI_MSI |
| 1767 | 1767 | case VMXNET3_IT_MSIX: { | |
| 1768 | vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size); | 1768 | int i; |
| 1769 | vmxnet3_enable_all_intrs(adapter); | 1769 | for (i = 0; i < adapter->num_rx_queues; i++) |
| 1770 | vmxnet3_msix_rx(0, &adapter->rx_queue[i]); | ||
| 1771 | break; | ||
| 1772 | } | ||
| 1773 | #endif | ||
| 1774 | case VMXNET3_IT_MSI: | ||
| 1775 | default: | ||
| 1776 | vmxnet3_intr(0, adapter->netdev); | ||
| 1777 | break; | ||
| 1778 | } | ||
| 1770 | 1779 | ||
| 1771 | } | 1780 | } |
| 1772 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | 1781 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index b0f705c2378f..1236812c7be6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -1318,6 +1318,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) | |||
| 1318 | 1318 | ||
| 1319 | neigh_release(n); | 1319 | neigh_release(n); |
| 1320 | 1320 | ||
| 1321 | if (reply == NULL) | ||
| 1322 | goto out; | ||
| 1323 | |||
| 1321 | skb_reset_mac_header(reply); | 1324 | skb_reset_mac_header(reply); |
| 1322 | __skb_pull(reply, skb_network_offset(reply)); | 1325 | __skb_pull(reply, skb_network_offset(reply)); |
| 1323 | reply->ip_summed = CHECKSUM_UNNECESSARY; | 1326 | reply->ip_summed = CHECKSUM_UNNECESSARY; |
| @@ -1339,15 +1342,103 @@ out: | |||
| 1339 | } | 1342 | } |
| 1340 | 1343 | ||
| 1341 | #if IS_ENABLED(CONFIG_IPV6) | 1344 | #if IS_ENABLED(CONFIG_IPV6) |
| 1345 | |||
| 1346 | static struct sk_buff *vxlan_na_create(struct sk_buff *request, | ||
| 1347 | struct neighbour *n, bool isrouter) | ||
| 1348 | { | ||
| 1349 | struct net_device *dev = request->dev; | ||
| 1350 | struct sk_buff *reply; | ||
| 1351 | struct nd_msg *ns, *na; | ||
| 1352 | struct ipv6hdr *pip6; | ||
| 1353 | u8 *daddr; | ||
| 1354 | int na_olen = 8; /* opt hdr + ETH_ALEN for target */ | ||
| 1355 | int ns_olen; | ||
| 1356 | int i, len; | ||
| 1357 | |||
| 1358 | if (dev == NULL) | ||
| 1359 | return NULL; | ||
| 1360 | |||
| 1361 | len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + | ||
| 1362 | sizeof(*na) + na_olen + dev->needed_tailroom; | ||
| 1363 | reply = alloc_skb(len, GFP_ATOMIC); | ||
| 1364 | if (reply == NULL) | ||
| 1365 | return NULL; | ||
| 1366 | |||
| 1367 | reply->protocol = htons(ETH_P_IPV6); | ||
| 1368 | reply->dev = dev; | ||
| 1369 | skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); | ||
| 1370 | skb_push(reply, sizeof(struct ethhdr)); | ||
| 1371 | skb_set_mac_header(reply, 0); | ||
| 1372 | |||
| 1373 | ns = (struct nd_msg *)skb_transport_header(request); | ||
| 1374 | |||
| 1375 | daddr = eth_hdr(request)->h_source; | ||
| 1376 | ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns); | ||
| 1377 | for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { | ||
| 1378 | if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { | ||
| 1379 | daddr = ns->opt + i + sizeof(struct nd_opt_hdr); | ||
| 1380 | break; | ||
| 1381 | } | ||
| 1382 | } | ||
| 1383 | |||
| 1384 | /* Ethernet header */ | ||
| 1385 | ether_addr_copy(eth_hdr(reply)->h_dest, daddr); | ||
| 1386 | ether_addr_copy(eth_hdr(reply)->h_source, n->ha); | ||
| 1387 | eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); | ||
| 1388 | reply->protocol = htons(ETH_P_IPV6); | ||
| 1389 | |||
| 1390 | skb_pull(reply, sizeof(struct ethhdr)); | ||
| 1391 | skb_set_network_header(reply, 0); | ||
| 1392 | skb_put(reply, sizeof(struct ipv6hdr)); | ||
| 1393 | |||
| 1394 | /* IPv6 header */ | ||
| 1395 | |||
| 1396 | pip6 = ipv6_hdr(reply); | ||
| 1397 | memset(pip6, 0, sizeof(struct ipv6hdr)); | ||
| 1398 | pip6->version = 6; | ||
| 1399 | pip6->priority = ipv6_hdr(request)->priority; | ||
| 1400 | pip6->nexthdr = IPPROTO_ICMPV6; | ||
| 1401 | pip6->hop_limit = 255; | ||
| 1402 | pip6->daddr = ipv6_hdr(request)->saddr; | ||
| 1403 | pip6->saddr = *(struct in6_addr *)n->primary_key; | ||
| 1404 | |||
| 1405 | skb_pull(reply, sizeof(struct ipv6hdr)); | ||
| 1406 | skb_set_transport_header(reply, 0); | ||
| 1407 | |||
| 1408 | na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); | ||
| 1409 | |||
| 1410 | /* Neighbor Advertisement */ | ||
| 1411 | memset(na, 0, sizeof(*na)+na_olen); | ||
| 1412 | na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; | ||
| 1413 | na->icmph.icmp6_router = isrouter; | ||
| 1414 | na->icmph.icmp6_override = 1; | ||
| 1415 | na->icmph.icmp6_solicited = 1; | ||
| 1416 | na->target = ns->target; | ||
| 1417 | ether_addr_copy(&na->opt[2], n->ha); | ||
| 1418 | na->opt[0] = ND_OPT_TARGET_LL_ADDR; | ||
| 1419 | na->opt[1] = na_olen >> 3; | ||
| 1420 | |||
| 1421 | na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, | ||
| 1422 | &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6, | ||
| 1423 | csum_partial(na, sizeof(*na)+na_olen, 0)); | ||
| 1424 | |||
| 1425 | pip6->payload_len = htons(sizeof(*na)+na_olen); | ||
| 1426 | |||
| 1427 | skb_push(reply, sizeof(struct ipv6hdr)); | ||
| 1428 | |||
| 1429 | reply->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 1430 | |||
| 1431 | return reply; | ||
| 1432 | } | ||
| 1433 | |||
| 1342 | static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) | 1434 | static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) |
| 1343 | { | 1435 | { |
| 1344 | struct vxlan_dev *vxlan = netdev_priv(dev); | 1436 | struct vxlan_dev *vxlan = netdev_priv(dev); |
| 1345 | struct neighbour *n; | 1437 | struct nd_msg *msg; |
| 1346 | union vxlan_addr ipa; | ||
| 1347 | const struct ipv6hdr *iphdr; | 1438 | const struct ipv6hdr *iphdr; |
| 1348 | const struct in6_addr *saddr, *daddr; | 1439 | const struct in6_addr *saddr, *daddr; |
| 1349 | struct nd_msg *msg; | 1440 | struct neighbour *n; |
| 1350 | struct inet6_dev *in6_dev = NULL; | 1441 | struct inet6_dev *in6_dev; |
| 1351 | 1442 | ||
| 1352 | in6_dev = __in6_dev_get(dev); | 1443 | in6_dev = __in6_dev_get(dev); |
| 1353 | if (!in6_dev) | 1444 | if (!in6_dev) |
| @@ -1360,19 +1451,20 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) | |||
| 1360 | saddr = &iphdr->saddr; | 1451 | saddr = &iphdr->saddr; |
| 1361 | daddr = &iphdr->daddr; | 1452 | daddr = &iphdr->daddr; |
| 1362 | 1453 | ||
| 1363 | if (ipv6_addr_loopback(daddr) || | ||
| 1364 | ipv6_addr_is_multicast(daddr)) | ||
| 1365 | goto out; | ||
| 1366 | |||
| 1367 | msg = (struct nd_msg *)skb_transport_header(skb); | 1454 | msg = (struct nd_msg *)skb_transport_header(skb); |
| 1368 | if (msg->icmph.icmp6_code != 0 || | 1455 | if (msg->icmph.icmp6_code != 0 || |
| 1369 | msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) | 1456 | msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) |
| 1370 | goto out; | 1457 | goto out; |
| 1371 | 1458 | ||
| 1372 | n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev); | 1459 | if (ipv6_addr_loopback(daddr) || |
| 1460 | ipv6_addr_is_multicast(&msg->target)) | ||
| 1461 | goto out; | ||
| 1462 | |||
| 1463 | n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev); | ||
| 1373 | 1464 | ||
| 1374 | if (n) { | 1465 | if (n) { |
| 1375 | struct vxlan_fdb *f; | 1466 | struct vxlan_fdb *f; |
| 1467 | struct sk_buff *reply; | ||
| 1376 | 1468 | ||
| 1377 | if (!(n->nud_state & NUD_CONNECTED)) { | 1469 | if (!(n->nud_state & NUD_CONNECTED)) { |
| 1378 | neigh_release(n); | 1470 | neigh_release(n); |
| @@ -1386,13 +1478,23 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) | |||
| 1386 | goto out; | 1478 | goto out; |
| 1387 | } | 1479 | } |
| 1388 | 1480 | ||
| 1389 | ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target, | 1481 | reply = vxlan_na_create(skb, n, |
| 1390 | !!in6_dev->cnf.forwarding, | 1482 | !!(f ? f->flags & NTF_ROUTER : 0)); |
| 1391 | true, false, false); | 1483 | |
| 1392 | neigh_release(n); | 1484 | neigh_release(n); |
| 1485 | |||
| 1486 | if (reply == NULL) | ||
| 1487 | goto out; | ||
| 1488 | |||
| 1489 | if (netif_rx_ni(reply) == NET_RX_DROP) | ||
| 1490 | dev->stats.rx_dropped++; | ||
| 1491 | |||
| 1393 | } else if (vxlan->flags & VXLAN_F_L3MISS) { | 1492 | } else if (vxlan->flags & VXLAN_F_L3MISS) { |
| 1394 | ipa.sin6.sin6_addr = *daddr; | 1493 | union vxlan_addr ipa = { |
| 1395 | ipa.sa.sa_family = AF_INET6; | 1494 | .sin6.sin6_addr = msg->target, |
| 1495 | .sa.sa_family = AF_INET6, | ||
| 1496 | }; | ||
| 1497 | |||
| 1396 | vxlan_ip_miss(dev, &ipa); | 1498 | vxlan_ip_miss(dev, &ipa); |
| 1397 | } | 1499 | } |
| 1398 | 1500 | ||
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h index 1cc13569b17b..1b6b4d0cfa97 100644 --- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h | |||
| @@ -57,7 +57,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = { | |||
| 57 | {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e}, | 57 | {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e}, |
| 58 | {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, | 58 | {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
| 59 | {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, | 59 | {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, |
| 60 | {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, | 60 | {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5}, |
| 61 | {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, | 61 | {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, |
| 62 | {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, | 62 | {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, |
| 63 | {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, | 63 | {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, |
| @@ -96,7 +96,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = { | |||
| 96 | {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000}, | 96 | {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000}, |
| 97 | {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, | 97 | {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
| 98 | {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, | 98 | {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, |
| 99 | {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce}, | 99 | {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa}, |
| 100 | {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550}, | 100 | {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550}, |
| 101 | }; | 101 | }; |
| 102 | 102 | ||
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 11eab9f01fd8..9078a6c5a74e 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
| @@ -1534,7 +1534,7 @@ EXPORT_SYMBOL(ath9k_hw_check_nav); | |||
| 1534 | bool ath9k_hw_check_alive(struct ath_hw *ah) | 1534 | bool ath9k_hw_check_alive(struct ath_hw *ah) |
| 1535 | { | 1535 | { |
| 1536 | int count = 50; | 1536 | int count = 50; |
| 1537 | u32 reg; | 1537 | u32 reg, last_val; |
| 1538 | 1538 | ||
| 1539 | if (AR_SREV_9300(ah)) | 1539 | if (AR_SREV_9300(ah)) |
| 1540 | return !ath9k_hw_detect_mac_hang(ah); | 1540 | return !ath9k_hw_detect_mac_hang(ah); |
| @@ -1542,9 +1542,14 @@ bool ath9k_hw_check_alive(struct ath_hw *ah) | |||
| 1542 | if (AR_SREV_9285_12_OR_LATER(ah)) | 1542 | if (AR_SREV_9285_12_OR_LATER(ah)) |
| 1543 | return true; | 1543 | return true; |
| 1544 | 1544 | ||
| 1545 | last_val = REG_READ(ah, AR_OBS_BUS_1); | ||
| 1545 | do { | 1546 | do { |
| 1546 | reg = REG_READ(ah, AR_OBS_BUS_1); | 1547 | reg = REG_READ(ah, AR_OBS_BUS_1); |
| 1548 | if (reg != last_val) | ||
| 1549 | return true; | ||
| 1547 | 1550 | ||
| 1551 | udelay(1); | ||
| 1552 | last_val = reg; | ||
| 1548 | if ((reg & 0x7E7FFFEF) == 0x00702400) | 1553 | if ((reg & 0x7E7FFFEF) == 0x00702400) |
| 1549 | continue; | 1554 | continue; |
| 1550 | 1555 | ||
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index a0ebdd000fc2..82e340d3ec60 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
| @@ -732,11 +732,18 @@ static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc, | |||
| 732 | return NULL; | 732 | return NULL; |
| 733 | 733 | ||
| 734 | /* | 734 | /* |
| 735 | * mark descriptor as zero-length and set the 'more' | 735 | * Re-check previous descriptor, in case it has been filled |
| 736 | * flag to ensure that both buffers get discarded | 736 | * in the mean time. |
| 737 | */ | 737 | */ |
| 738 | rs->rs_datalen = 0; | 738 | ret = ath9k_hw_rxprocdesc(ah, ds, rs); |
| 739 | rs->rs_more = true; | 739 | if (ret == -EINPROGRESS) { |
| 740 | /* | ||
| 741 | * mark descriptor as zero-length and set the 'more' | ||
| 742 | * flag to ensure that both buffers get discarded | ||
| 743 | */ | ||
| 744 | rs->rs_datalen = 0; | ||
| 745 | rs->rs_more = true; | ||
| 746 | } | ||
| 740 | } | 747 | } |
| 741 | 748 | ||
| 742 | list_del(&bf->list); | 749 | list_del(&bf->list); |
| @@ -985,32 +992,32 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, | |||
| 985 | struct ath_common *common = ath9k_hw_common(ah); | 992 | struct ath_common *common = ath9k_hw_common(ah); |
| 986 | struct ieee80211_hdr *hdr; | 993 | struct ieee80211_hdr *hdr; |
| 987 | bool discard_current = sc->rx.discard_next; | 994 | bool discard_current = sc->rx.discard_next; |
| 988 | int ret = 0; | ||
| 989 | 995 | ||
| 990 | /* | 996 | /* |
| 991 | * Discard corrupt descriptors which are marked in | 997 | * Discard corrupt descriptors which are marked in |
| 992 | * ath_get_next_rx_buf(). | 998 | * ath_get_next_rx_buf(). |
| 993 | */ | 999 | */ |
| 994 | sc->rx.discard_next = rx_stats->rs_more; | ||
| 995 | if (discard_current) | 1000 | if (discard_current) |
| 996 | return -EINVAL; | 1001 | goto corrupt; |
| 1002 | |||
| 1003 | sc->rx.discard_next = false; | ||
| 997 | 1004 | ||
| 998 | /* | 1005 | /* |
| 999 | * Discard zero-length packets. | 1006 | * Discard zero-length packets. |
| 1000 | */ | 1007 | */ |
| 1001 | if (!rx_stats->rs_datalen) { | 1008 | if (!rx_stats->rs_datalen) { |
| 1002 | RX_STAT_INC(rx_len_err); | 1009 | RX_STAT_INC(rx_len_err); |
| 1003 | return -EINVAL; | 1010 | goto corrupt; |
| 1004 | } | 1011 | } |
| 1005 | 1012 | ||
| 1006 | /* | 1013 | /* |
| 1007 | * rs_status follows rs_datalen so if rs_datalen is too large | 1014 | * rs_status follows rs_datalen so if rs_datalen is too large |
| 1008 | * we can take a hint that hardware corrupted it, so ignore | 1015 | * we can take a hint that hardware corrupted it, so ignore |
| 1009 | * those frames. | 1016 | * those frames. |
| 1010 | */ | 1017 | */ |
| 1011 | if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { | 1018 | if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { |
| 1012 | RX_STAT_INC(rx_len_err); | 1019 | RX_STAT_INC(rx_len_err); |
| 1013 | return -EINVAL; | 1020 | goto corrupt; |
| 1014 | } | 1021 | } |
| 1015 | 1022 | ||
| 1016 | /* Only use status info from the last fragment */ | 1023 | /* Only use status info from the last fragment */ |
| @@ -1024,10 +1031,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, | |||
| 1024 | * This is different from the other corrupt descriptor | 1031 | * This is different from the other corrupt descriptor |
| 1025 | * condition handled above. | 1032 | * condition handled above. |
| 1026 | */ | 1033 | */ |
| 1027 | if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) { | 1034 | if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) |
| 1028 | ret = -EINVAL; | 1035 | goto corrupt; |
| 1029 | goto exit; | ||
| 1030 | } | ||
| 1031 | 1036 | ||
| 1032 | hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); | 1037 | hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); |
| 1033 | 1038 | ||
| @@ -1043,18 +1048,15 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, | |||
| 1043 | if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime)) | 1048 | if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime)) |
| 1044 | RX_STAT_INC(rx_spectral); | 1049 | RX_STAT_INC(rx_spectral); |
| 1045 | 1050 | ||
| 1046 | ret = -EINVAL; | 1051 | return -EINVAL; |
| 1047 | goto exit; | ||
| 1048 | } | 1052 | } |
| 1049 | 1053 | ||
| 1050 | /* | 1054 | /* |
| 1051 | * everything but the rate is checked here, the rate check is done | 1055 | * everything but the rate is checked here, the rate check is done |
| 1052 | * separately to avoid doing two lookups for a rate for each frame. | 1056 | * separately to avoid doing two lookups for a rate for each frame. |
| 1053 | */ | 1057 | */ |
| 1054 | if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) { | 1058 | if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) |
| 1055 | ret = -EINVAL; | 1059 | return -EINVAL; |
| 1056 | goto exit; | ||
| 1057 | } | ||
| 1058 | 1060 | ||
| 1059 | if (ath_is_mybeacon(common, hdr)) { | 1061 | if (ath_is_mybeacon(common, hdr)) { |
| 1060 | RX_STAT_INC(rx_beacons); | 1062 | RX_STAT_INC(rx_beacons); |
| @@ -1064,15 +1066,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, | |||
| 1064 | /* | 1066 | /* |
| 1065 | * This shouldn't happen, but have a safety check anyway. | 1067 | * This shouldn't happen, but have a safety check anyway. |
| 1066 | */ | 1068 | */ |
| 1067 | if (WARN_ON(!ah->curchan)) { | 1069 | if (WARN_ON(!ah->curchan)) |
| 1068 | ret = -EINVAL; | 1070 | return -EINVAL; |
| 1069 | goto exit; | ||
| 1070 | } | ||
| 1071 | 1071 | ||
| 1072 | if (ath9k_process_rate(common, hw, rx_stats, rx_status)) { | 1072 | if (ath9k_process_rate(common, hw, rx_stats, rx_status)) |
| 1073 | ret =-EINVAL; | 1073 | return -EINVAL; |
| 1074 | goto exit; | ||
| 1075 | } | ||
| 1076 | 1074 | ||
| 1077 | ath9k_process_rssi(common, hw, rx_stats, rx_status); | 1075 | ath9k_process_rssi(common, hw, rx_stats, rx_status); |
| 1078 | 1076 | ||
| @@ -1087,9 +1085,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, | |||
| 1087 | sc->rx.num_pkts++; | 1085 | sc->rx.num_pkts++; |
| 1088 | #endif | 1086 | #endif |
| 1089 | 1087 | ||
| 1090 | exit: | 1088 | return 0; |
| 1091 | sc->rx.discard_next = false; | 1089 | |
| 1092 | return ret; | 1090 | corrupt: |
| 1091 | sc->rx.discard_next = rx_stats->rs_more; | ||
| 1092 | return -EINVAL; | ||
| 1093 | } | 1093 | } |
| 1094 | 1094 | ||
| 1095 | static void ath9k_rx_skb_postprocess(struct ath_common *common, | 1095 | static void ath9k_rx_skb_postprocess(struct ath_common *common, |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 0a75e2f68c9d..55897d508a76 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
| @@ -1444,14 +1444,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, | |||
| 1444 | for (tidno = 0, tid = &an->tid[tidno]; | 1444 | for (tidno = 0, tid = &an->tid[tidno]; |
| 1445 | tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { | 1445 | tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { |
| 1446 | 1446 | ||
| 1447 | if (!tid->sched) | ||
| 1448 | continue; | ||
| 1449 | |||
| 1450 | ac = tid->ac; | 1447 | ac = tid->ac; |
| 1451 | txq = ac->txq; | 1448 | txq = ac->txq; |
| 1452 | 1449 | ||
| 1453 | ath_txq_lock(sc, txq); | 1450 | ath_txq_lock(sc, txq); |
| 1454 | 1451 | ||
| 1452 | if (!tid->sched) { | ||
| 1453 | ath_txq_unlock(sc, txq); | ||
| 1454 | continue; | ||
| 1455 | } | ||
| 1456 | |||
| 1455 | buffered = ath_tid_has_buffered(tid); | 1457 | buffered = ath_tid_has_buffered(tid); |
| 1456 | 1458 | ||
| 1457 | tid->sched = false; | 1459 | tid->sched = false; |
| @@ -2061,7 +2063,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, | |||
| 2061 | 2063 | ||
| 2062 | ATH_TXBUF_RESET(bf); | 2064 | ATH_TXBUF_RESET(bf); |
| 2063 | 2065 | ||
| 2064 | if (tid) { | 2066 | if (tid && ieee80211_is_data_present(hdr->frame_control)) { |
| 2065 | fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; | 2067 | fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; |
| 2066 | seqno = tid->seq_next; | 2068 | seqno = tid->seq_next; |
| 2067 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); | 2069 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); |
| @@ -2184,14 +2186,15 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
| 2184 | txq->stopped = true; | 2186 | txq->stopped = true; |
| 2185 | } | 2187 | } |
| 2186 | 2188 | ||
| 2189 | if (txctl->an && ieee80211_is_data_present(hdr->frame_control)) | ||
| 2190 | tid = ath_get_skb_tid(sc, txctl->an, skb); | ||
| 2191 | |||
| 2187 | if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { | 2192 | if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { |
| 2188 | ath_txq_unlock(sc, txq); | 2193 | ath_txq_unlock(sc, txq); |
| 2189 | txq = sc->tx.uapsdq; | 2194 | txq = sc->tx.uapsdq; |
| 2190 | ath_txq_lock(sc, txq); | 2195 | ath_txq_lock(sc, txq); |
| 2191 | } else if (txctl->an && | 2196 | } else if (txctl->an && |
| 2192 | ieee80211_is_data_present(hdr->frame_control)) { | 2197 | ieee80211_is_data_present(hdr->frame_control)) { |
| 2193 | tid = ath_get_skb_tid(sc, txctl->an, skb); | ||
| 2194 | |||
| 2195 | WARN_ON(tid->ac->txq != txctl->txq); | 2198 | WARN_ON(tid->ac->txq != txctl->txq); |
| 2196 | 2199 | ||
| 2197 | if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) | 2200 | if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 3e991897d7ca..ddaa9efd053d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c | |||
| @@ -457,7 +457,6 @@ struct brcmf_sdio { | |||
| 457 | 457 | ||
| 458 | u8 tx_hdrlen; /* sdio bus header length for tx packet */ | 458 | u8 tx_hdrlen; /* sdio bus header length for tx packet */ |
| 459 | bool txglom; /* host tx glomming enable flag */ | 459 | bool txglom; /* host tx glomming enable flag */ |
| 460 | struct sk_buff *txglom_sgpad; /* scatter-gather padding buffer */ | ||
| 461 | u16 head_align; /* buffer pointer alignment */ | 460 | u16 head_align; /* buffer pointer alignment */ |
| 462 | u16 sgentry_align; /* scatter-gather buffer alignment */ | 461 | u16 sgentry_align; /* scatter-gather buffer alignment */ |
| 463 | }; | 462 | }; |
| @@ -1944,19 +1943,21 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus, | |||
| 1944 | if (lastfrm && chain_pad) | 1943 | if (lastfrm && chain_pad) |
| 1945 | tail_pad += blksize - chain_pad; | 1944 | tail_pad += blksize - chain_pad; |
| 1946 | if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) { | 1945 | if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) { |
| 1947 | pkt_pad = bus->txglom_sgpad; | 1946 | pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop + |
| 1948 | if (pkt_pad == NULL) | 1947 | bus->head_align); |
| 1949 | brcmu_pkt_buf_get_skb(tail_pad + tail_chop); | ||
| 1950 | if (pkt_pad == NULL) | 1948 | if (pkt_pad == NULL) |
| 1951 | return -ENOMEM; | 1949 | return -ENOMEM; |
| 1952 | ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad); | 1950 | ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad); |
| 1953 | if (unlikely(ret < 0)) | 1951 | if (unlikely(ret < 0)) { |
| 1952 | kfree_skb(pkt_pad); | ||
| 1954 | return ret; | 1953 | return ret; |
| 1954 | } | ||
| 1955 | memcpy(pkt_pad->data, | 1955 | memcpy(pkt_pad->data, |
| 1956 | pkt->data + pkt->len - tail_chop, | 1956 | pkt->data + pkt->len - tail_chop, |
| 1957 | tail_chop); | 1957 | tail_chop); |
| 1958 | *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop; | 1958 | *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop; |
| 1959 | skb_trim(pkt, pkt->len - tail_chop); | 1959 | skb_trim(pkt, pkt->len - tail_chop); |
| 1960 | skb_trim(pkt_pad, tail_pad + tail_chop); | ||
| 1960 | __skb_queue_after(pktq, pkt, pkt_pad); | 1961 | __skb_queue_after(pktq, pkt, pkt_pad); |
| 1961 | } else { | 1962 | } else { |
| 1962 | ntail = pkt->data_len + tail_pad - | 1963 | ntail = pkt->data_len + tail_pad - |
| @@ -2011,7 +2012,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq, | |||
| 2011 | return ret; | 2012 | return ret; |
| 2012 | head_pad = (u16)ret; | 2013 | head_pad = (u16)ret; |
| 2013 | if (head_pad) | 2014 | if (head_pad) |
| 2014 | memset(pkt_next->data, 0, head_pad + bus->tx_hdrlen); | 2015 | memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad); |
| 2015 | 2016 | ||
| 2016 | total_len += pkt_next->len; | 2017 | total_len += pkt_next->len; |
| 2017 | 2018 | ||
| @@ -3486,10 +3487,6 @@ static int brcmf_sdio_bus_preinit(struct device *dev) | |||
| 3486 | bus->txglom = false; | 3487 | bus->txglom = false; |
| 3487 | value = 1; | 3488 | value = 1; |
| 3488 | pad_size = bus->sdiodev->func[2]->cur_blksize << 1; | 3489 | pad_size = bus->sdiodev->func[2]->cur_blksize << 1; |
| 3489 | bus->txglom_sgpad = brcmu_pkt_buf_get_skb(pad_size); | ||
| 3490 | if (!bus->txglom_sgpad) | ||
| 3491 | brcmf_err("allocating txglom padding skb failed, reduced performance\n"); | ||
| 3492 | |||
| 3493 | err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom", | 3490 | err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom", |
| 3494 | &value, sizeof(u32)); | 3491 | &value, sizeof(u32)); |
| 3495 | if (err < 0) { | 3492 | if (err < 0) { |
| @@ -4053,7 +4050,6 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) | |||
| 4053 | brcmf_sdio_chip_detach(&bus->ci); | 4050 | brcmf_sdio_chip_detach(&bus->ci); |
| 4054 | } | 4051 | } |
| 4055 | 4052 | ||
| 4056 | brcmu_pkt_buf_free_skb(bus->txglom_sgpad); | ||
| 4057 | kfree(bus->rxbuf); | 4053 | kfree(bus->rxbuf); |
| 4058 | kfree(bus->hdrbuf); | 4054 | kfree(bus->hdrbuf); |
| 4059 | kfree(bus); | 4055 | kfree(bus); |
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index d36e252d2ccb..596525528f50 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c | |||
| @@ -147,7 +147,7 @@ static void ap_free_sta(struct ap_data *ap, struct sta_info *sta) | |||
| 147 | 147 | ||
| 148 | if (!sta->ap && sta->u.sta.challenge) | 148 | if (!sta->ap && sta->u.sta.challenge) |
| 149 | kfree(sta->u.sta.challenge); | 149 | kfree(sta->u.sta.challenge); |
| 150 | del_timer(&sta->timer); | 150 | del_timer_sync(&sta->timer); |
| 151 | #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ | 151 | #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ |
| 152 | 152 | ||
| 153 | kfree(sta); | 153 | kfree(sta); |
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c index c0d070c5df5e..9cdd91cdf661 100644 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c | |||
| @@ -590,6 +590,7 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id, | |||
| 590 | sizeof(priv->tid_data[sta_id][tid])); | 590 | sizeof(priv->tid_data[sta_id][tid])); |
| 591 | 591 | ||
| 592 | priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; | 592 | priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; |
| 593 | priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; | ||
| 593 | 594 | ||
| 594 | priv->num_stations--; | 595 | priv->num_stations--; |
| 595 | 596 | ||
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index a6839dfcb82d..398dd096674c 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c | |||
| @@ -1291,8 +1291,6 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, | |||
| 1291 | struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data; | 1291 | struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data; |
| 1292 | struct iwl_ht_agg *agg; | 1292 | struct iwl_ht_agg *agg; |
| 1293 | struct sk_buff_head reclaimed_skbs; | 1293 | struct sk_buff_head reclaimed_skbs; |
| 1294 | struct ieee80211_tx_info *info; | ||
| 1295 | struct ieee80211_hdr *hdr; | ||
| 1296 | struct sk_buff *skb; | 1294 | struct sk_buff *skb; |
| 1297 | int sta_id; | 1295 | int sta_id; |
| 1298 | int tid; | 1296 | int tid; |
| @@ -1379,22 +1377,28 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, | |||
| 1379 | freed = 0; | 1377 | freed = 0; |
| 1380 | 1378 | ||
| 1381 | skb_queue_walk(&reclaimed_skbs, skb) { | 1379 | skb_queue_walk(&reclaimed_skbs, skb) { |
| 1382 | hdr = (struct ieee80211_hdr *)skb->data; | 1380 | struct ieee80211_hdr *hdr = (void *)skb->data; |
| 1381 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
| 1383 | 1382 | ||
| 1384 | if (ieee80211_is_data_qos(hdr->frame_control)) | 1383 | if (ieee80211_is_data_qos(hdr->frame_control)) |
| 1385 | freed++; | 1384 | freed++; |
| 1386 | else | 1385 | else |
| 1387 | WARN_ON_ONCE(1); | 1386 | WARN_ON_ONCE(1); |
| 1388 | 1387 | ||
| 1389 | info = IEEE80211_SKB_CB(skb); | ||
| 1390 | iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); | 1388 | iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); |
| 1391 | 1389 | ||
| 1390 | memset(&info->status, 0, sizeof(info->status)); | ||
| 1391 | /* Packet was transmitted successfully, failures come as single | ||
| 1392 | * frames because before failing a frame the firmware transmits | ||
| 1393 | * it without aggregation at least once. | ||
| 1394 | */ | ||
| 1395 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
| 1396 | |||
| 1392 | if (freed == 1) { | 1397 | if (freed == 1) { |
| 1393 | /* this is the first skb we deliver in this batch */ | 1398 | /* this is the first skb we deliver in this batch */ |
| 1394 | /* put the rate scaling data there */ | 1399 | /* put the rate scaling data there */ |
| 1395 | info = IEEE80211_SKB_CB(skb); | 1400 | info = IEEE80211_SKB_CB(skb); |
| 1396 | memset(&info->status, 0, sizeof(info->status)); | 1401 | memset(&info->status, 0, sizeof(info->status)); |
| 1397 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
| 1398 | info->flags |= IEEE80211_TX_STAT_AMPDU; | 1402 | info->flags |= IEEE80211_TX_STAT_AMPDU; |
| 1399 | info->status.ampdu_ack_len = ba_resp->txed_2_done; | 1403 | info->status.ampdu_ack_len = ba_resp->txed_2_done; |
| 1400 | info->status.ampdu_len = ba_resp->txed; | 1404 | info->status.ampdu_len = ba_resp->txed; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c index 76cde6ce6551..18a895a949d4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c | |||
| @@ -872,8 +872,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |||
| 872 | 872 | ||
| 873 | lockdep_assert_held(&mvm->mutex); | 873 | lockdep_assert_held(&mvm->mutex); |
| 874 | 874 | ||
| 875 | /* Rssi update while not associated ?! */ | 875 | /* |
| 876 | if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) | 876 | * Rssi update while not associated - can happen since the statistics |
| 877 | * are handled asynchronously | ||
| 878 | */ | ||
| 879 | if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) | ||
| 877 | return; | 880 | return; |
| 878 | 881 | ||
| 879 | /* No BT - reports should be disabled */ | 882 | /* No BT - reports should be disabled */ |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index e4ead86f06d6..2b0ba1fc3c82 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h | |||
| @@ -152,7 +152,7 @@ enum iwl_power_scheme { | |||
| 152 | IWL_POWER_SCHEME_LP | 152 | IWL_POWER_SCHEME_LP |
| 153 | }; | 153 | }; |
| 154 | 154 | ||
| 155 | #define IWL_CONN_MAX_LISTEN_INTERVAL 70 | 155 | #define IWL_CONN_MAX_LISTEN_INTERVAL 10 |
| 156 | #define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ | 156 | #define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ |
| 157 | IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ | 157 | IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ |
| 158 | IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\ | 158 | IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\ |
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 4df12fa9d336..76ee486039d7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
| @@ -822,16 +822,12 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, | |||
| 822 | struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data; | 822 | struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data; |
| 823 | struct sk_buff_head reclaimed_skbs; | 823 | struct sk_buff_head reclaimed_skbs; |
| 824 | struct iwl_mvm_tid_data *tid_data; | 824 | struct iwl_mvm_tid_data *tid_data; |
| 825 | struct ieee80211_tx_info *info; | ||
| 826 | struct ieee80211_sta *sta; | 825 | struct ieee80211_sta *sta; |
| 827 | struct iwl_mvm_sta *mvmsta; | 826 | struct iwl_mvm_sta *mvmsta; |
| 828 | struct ieee80211_hdr *hdr; | ||
| 829 | struct sk_buff *skb; | 827 | struct sk_buff *skb; |
| 830 | int sta_id, tid, freed; | 828 | int sta_id, tid, freed; |
| 831 | |||
| 832 | /* "flow" corresponds to Tx queue */ | 829 | /* "flow" corresponds to Tx queue */ |
| 833 | u16 scd_flow = le16_to_cpu(ba_notif->scd_flow); | 830 | u16 scd_flow = le16_to_cpu(ba_notif->scd_flow); |
| 834 | |||
| 835 | /* "ssn" is start of block-ack Tx window, corresponds to index | 831 | /* "ssn" is start of block-ack Tx window, corresponds to index |
| 836 | * (in Tx queue's circular buffer) of first TFD/frame in window */ | 832 | * (in Tx queue's circular buffer) of first TFD/frame in window */ |
| 837 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn); | 833 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn); |
| @@ -888,22 +884,26 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, | |||
| 888 | freed = 0; | 884 | freed = 0; |
| 889 | 885 | ||
| 890 | skb_queue_walk(&reclaimed_skbs, skb) { | 886 | skb_queue_walk(&reclaimed_skbs, skb) { |
| 891 | hdr = (struct ieee80211_hdr *)skb->data; | 887 | struct ieee80211_hdr *hdr = (void *)skb->data; |
| 888 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
| 892 | 889 | ||
| 893 | if (ieee80211_is_data_qos(hdr->frame_control)) | 890 | if (ieee80211_is_data_qos(hdr->frame_control)) |
| 894 | freed++; | 891 | freed++; |
| 895 | else | 892 | else |
| 896 | WARN_ON_ONCE(1); | 893 | WARN_ON_ONCE(1); |
| 897 | 894 | ||
| 898 | info = IEEE80211_SKB_CB(skb); | ||
| 899 | iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); | 895 | iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); |
| 900 | 896 | ||
| 897 | memset(&info->status, 0, sizeof(info->status)); | ||
| 898 | /* Packet was transmitted successfully, failures come as single | ||
| 899 | * frames because before failing a frame the firmware transmits | ||
| 900 | * it without aggregation at least once. | ||
| 901 | */ | ||
| 902 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
| 903 | |||
| 901 | if (freed == 1) { | 904 | if (freed == 1) { |
| 902 | /* this is the first skb we deliver in this batch */ | 905 | /* this is the first skb we deliver in this batch */ |
| 903 | /* put the rate scaling data there */ | 906 | /* put the rate scaling data there */ |
| 904 | info = IEEE80211_SKB_CB(skb); | ||
| 905 | memset(&info->status, 0, sizeof(info->status)); | ||
| 906 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
| 907 | info->flags |= IEEE80211_TX_STAT_AMPDU; | 907 | info->flags |= IEEE80211_TX_STAT_AMPDU; |
| 908 | info->status.ampdu_ack_len = ba_notif->txed_2_done; | 908 | info->status.ampdu_ack_len = ba_notif->txed_2_done; |
| 909 | info->status.ampdu_len = ba_notif->txed; | 909 | info->status.ampdu_len = ba_notif->txed; |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index f47bcbe2945a..3872ead75488 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
| @@ -359,13 +359,12 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { | |||
| 359 | /* 7265 Series */ | 359 | /* 7265 Series */ |
| 360 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, | 360 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, |
| 361 | {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, | 361 | {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, |
| 362 | {IWL_PCI_DEVICE(0x095A, 0x5112, iwl7265_2ac_cfg)}, | ||
| 363 | {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, | 362 | {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, |
| 364 | {IWL_PCI_DEVICE(0x095A, 0x510A, iwl7265_2ac_cfg)}, | ||
| 365 | {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, | 363 | {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, |
| 366 | {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, | 364 | {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, |
| 367 | {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, | 365 | {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, |
| 368 | {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, | 366 | {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, |
| 367 | {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, | ||
| 369 | {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, | 368 | {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, |
| 370 | {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, | 369 | {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, |
| 371 | {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, | 370 | {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, |
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 32f75007a825..cb6d189bc3e6 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c | |||
| @@ -621,7 +621,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, | |||
| 621 | id = *pos++; | 621 | id = *pos++; |
| 622 | elen = *pos++; | 622 | elen = *pos++; |
| 623 | left -= 2; | 623 | left -= 2; |
| 624 | if (elen > left || elen == 0) { | 624 | if (elen > left) { |
| 625 | lbs_deb_scan("scan response: invalid IE fmt\n"); | 625 | lbs_deb_scan("scan response: invalid IE fmt\n"); |
| 626 | goto done; | 626 | goto done; |
| 627 | } | 627 | } |
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c index 5e0eec4d71c7..5d9a8084665d 100644 --- a/drivers/net/wireless/mwifiex/11ac.c +++ b/drivers/net/wireless/mwifiex/11ac.c | |||
| @@ -189,8 +189,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv, | |||
| 189 | vht_cap->header.len = | 189 | vht_cap->header.len = |
| 190 | cpu_to_le16(sizeof(struct ieee80211_vht_cap)); | 190 | cpu_to_le16(sizeof(struct ieee80211_vht_cap)); |
| 191 | memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header), | 191 | memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header), |
| 192 | (u8 *)bss_desc->bcn_vht_cap + | 192 | (u8 *)bss_desc->bcn_vht_cap, |
| 193 | sizeof(struct ieee_types_header), | ||
| 194 | le16_to_cpu(vht_cap->header.len)); | 193 | le16_to_cpu(vht_cap->header.len)); |
| 195 | 194 | ||
| 196 | mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band); | 195 | mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band); |
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c index 6261f8c53d44..7db1a89fdd95 100644 --- a/drivers/net/wireless/mwifiex/11n.c +++ b/drivers/net/wireless/mwifiex/11n.c | |||
| @@ -308,8 +308,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, | |||
| 308 | ht_cap->header.len = | 308 | ht_cap->header.len = |
| 309 | cpu_to_le16(sizeof(struct ieee80211_ht_cap)); | 309 | cpu_to_le16(sizeof(struct ieee80211_ht_cap)); |
| 310 | memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header), | 310 | memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header), |
| 311 | (u8 *) bss_desc->bcn_ht_cap + | 311 | (u8 *)bss_desc->bcn_ht_cap, |
| 312 | sizeof(struct ieee_types_header), | ||
| 313 | le16_to_cpu(ht_cap->header.len)); | 312 | le16_to_cpu(ht_cap->header.len)); |
| 314 | 313 | ||
| 315 | mwifiex_fill_cap_info(priv, radio_type, ht_cap); | 314 | mwifiex_fill_cap_info(priv, radio_type, ht_cap); |
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 03688aa14e8a..7fe7b53fb17a 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c | |||
| @@ -1211,6 +1211,12 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) | |||
| 1211 | rd_index = card->rxbd_rdptr & reg->rx_mask; | 1211 | rd_index = card->rxbd_rdptr & reg->rx_mask; |
| 1212 | skb_data = card->rx_buf_list[rd_index]; | 1212 | skb_data = card->rx_buf_list[rd_index]; |
| 1213 | 1213 | ||
| 1214 | /* If skb allocation was failed earlier for Rx packet, | ||
| 1215 | * rx_buf_list[rd_index] would have been left with a NULL. | ||
| 1216 | */ | ||
| 1217 | if (!skb_data) | ||
| 1218 | return -ENOMEM; | ||
| 1219 | |||
| 1214 | MWIFIEX_SKB_PACB(skb_data, &buf_pa); | 1220 | MWIFIEX_SKB_PACB(skb_data, &buf_pa); |
| 1215 | pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE, | 1221 | pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE, |
| 1216 | PCI_DMA_FROMDEVICE); | 1222 | PCI_DMA_FROMDEVICE); |
| @@ -1525,6 +1531,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) | |||
| 1525 | if (adapter->ps_state == PS_STATE_SLEEP_CFM) { | 1531 | if (adapter->ps_state == PS_STATE_SLEEP_CFM) { |
| 1526 | mwifiex_process_sleep_confirm_resp(adapter, skb->data, | 1532 | mwifiex_process_sleep_confirm_resp(adapter, skb->data, |
| 1527 | skb->len); | 1533 | skb->len); |
| 1534 | mwifiex_pcie_enable_host_int(adapter); | ||
| 1535 | if (mwifiex_write_reg(adapter, | ||
| 1536 | PCIE_CPU_INT_EVENT, | ||
| 1537 | CPU_INTR_SLEEP_CFM_DONE)) { | ||
| 1538 | dev_warn(adapter->dev, | ||
| 1539 | "Write register failed\n"); | ||
| 1540 | return -1; | ||
| 1541 | } | ||
| 1528 | while (reg->sleep_cookie && (count++ < 10) && | 1542 | while (reg->sleep_cookie && (count++ < 10) && |
| 1529 | mwifiex_pcie_ok_to_access_hw(adapter)) | 1543 | mwifiex_pcie_ok_to_access_hw(adapter)) |
| 1530 | usleep_range(50, 60); | 1544 | usleep_range(50, 60); |
| @@ -1993,23 +2007,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) | |||
| 1993 | adapter->int_status |= pcie_ireg; | 2007 | adapter->int_status |= pcie_ireg; |
| 1994 | spin_unlock_irqrestore(&adapter->int_lock, flags); | 2008 | spin_unlock_irqrestore(&adapter->int_lock, flags); |
| 1995 | 2009 | ||
| 1996 | if (pcie_ireg & HOST_INTR_CMD_DONE) { | 2010 | if (!adapter->pps_uapsd_mode && |
| 1997 | if ((adapter->ps_state == PS_STATE_SLEEP_CFM) || | 2011 | adapter->ps_state == PS_STATE_SLEEP && |
| 1998 | (adapter->ps_state == PS_STATE_SLEEP)) { | 2012 | mwifiex_pcie_ok_to_access_hw(adapter)) { |
| 1999 | mwifiex_pcie_enable_host_int(adapter); | ||
| 2000 | if (mwifiex_write_reg(adapter, | ||
| 2001 | PCIE_CPU_INT_EVENT, | ||
| 2002 | CPU_INTR_SLEEP_CFM_DONE) | ||
| 2003 | ) { | ||
| 2004 | dev_warn(adapter->dev, | ||
| 2005 | "Write register failed\n"); | ||
| 2006 | return; | ||
| 2007 | |||
| 2008 | } | ||
| 2009 | } | ||
| 2010 | } else if (!adapter->pps_uapsd_mode && | ||
| 2011 | adapter->ps_state == PS_STATE_SLEEP && | ||
| 2012 | mwifiex_pcie_ok_to_access_hw(adapter)) { | ||
| 2013 | /* Potentially for PCIe we could get other | 2013 | /* Potentially for PCIe we could get other |
| 2014 | * interrupts like shared. Don't change power | 2014 | * interrupts like shared. Don't change power |
| 2015 | * state until cookie is set */ | 2015 | * state until cookie is set */ |
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 0a8a26e10f01..668547c2de84 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c | |||
| @@ -2101,12 +2101,12 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv) | |||
| 2101 | curr_bss->ht_info_offset); | 2101 | curr_bss->ht_info_offset); |
| 2102 | 2102 | ||
| 2103 | if (curr_bss->bcn_vht_cap) | 2103 | if (curr_bss->bcn_vht_cap) |
| 2104 | curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf + | 2104 | curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf + |
| 2105 | curr_bss->vht_cap_offset); | 2105 | curr_bss->vht_cap_offset); |
| 2106 | 2106 | ||
| 2107 | if (curr_bss->bcn_vht_oper) | 2107 | if (curr_bss->bcn_vht_oper) |
| 2108 | curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf + | 2108 | curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf + |
| 2109 | curr_bss->vht_info_offset); | 2109 | curr_bss->vht_info_offset); |
| 2110 | 2110 | ||
| 2111 | if (curr_bss->bcn_bss_co_2040) | 2111 | if (curr_bss->bcn_bss_co_2040) |
| 2112 | curr_bss->bcn_bss_co_2040 = | 2112 | curr_bss->bcn_bss_co_2040 = |
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index e8ebbd4bc3cd..208748804a55 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c | |||
| @@ -22,8 +22,6 @@ | |||
| 22 | 22 | ||
| 23 | #define USB_VERSION "1.0" | 23 | #define USB_VERSION "1.0" |
| 24 | 24 | ||
| 25 | static const char usbdriver_name[] = "usb8xxx"; | ||
| 26 | |||
| 27 | static struct mwifiex_if_ops usb_ops; | 25 | static struct mwifiex_if_ops usb_ops; |
| 28 | static struct semaphore add_remove_card_sem; | 26 | static struct semaphore add_remove_card_sem; |
| 29 | static struct usb_card_rec *usb_card; | 27 | static struct usb_card_rec *usb_card; |
| @@ -527,13 +525,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf) | |||
| 527 | MWIFIEX_BSS_ROLE_ANY), | 525 | MWIFIEX_BSS_ROLE_ANY), |
| 528 | MWIFIEX_ASYNC_CMD); | 526 | MWIFIEX_ASYNC_CMD); |
| 529 | 527 | ||
| 530 | #ifdef CONFIG_PM | ||
| 531 | /* Resume handler may be called due to remote wakeup, | ||
| 532 | * force to exit suspend anyway | ||
| 533 | */ | ||
| 534 | usb_disable_autosuspend(card->udev); | ||
| 535 | #endif /* CONFIG_PM */ | ||
| 536 | |||
| 537 | return 0; | 528 | return 0; |
| 538 | } | 529 | } |
| 539 | 530 | ||
| @@ -567,13 +558,12 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) | |||
| 567 | } | 558 | } |
| 568 | 559 | ||
| 569 | static struct usb_driver mwifiex_usb_driver = { | 560 | static struct usb_driver mwifiex_usb_driver = { |
| 570 | .name = usbdriver_name, | 561 | .name = "mwifiex_usb", |
| 571 | .probe = mwifiex_usb_probe, | 562 | .probe = mwifiex_usb_probe, |
| 572 | .disconnect = mwifiex_usb_disconnect, | 563 | .disconnect = mwifiex_usb_disconnect, |
| 573 | .id_table = mwifiex_usb_table, | 564 | .id_table = mwifiex_usb_table, |
| 574 | .suspend = mwifiex_usb_suspend, | 565 | .suspend = mwifiex_usb_suspend, |
| 575 | .resume = mwifiex_usb_resume, | 566 | .resume = mwifiex_usb_resume, |
| 576 | .supports_autosuspend = 1, | ||
| 577 | }; | 567 | }; |
| 578 | 568 | ||
| 579 | static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) | 569 | static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) |
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 13eaeed03898..981cf6e7c73b 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c | |||
| @@ -559,7 +559,8 @@ mwifiex_clean_txrx(struct mwifiex_private *priv) | |||
| 559 | mwifiex_wmm_delete_all_ralist(priv); | 559 | mwifiex_wmm_delete_all_ralist(priv); |
| 560 | memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid)); | 560 | memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid)); |
| 561 | 561 | ||
| 562 | if (priv->adapter->if_ops.clean_pcie_ring) | 562 | if (priv->adapter->if_ops.clean_pcie_ring && |
| 563 | !priv->adapter->surprise_removed) | ||
| 563 | priv->adapter->if_ops.clean_pcie_ring(priv->adapter); | 564 | priv->adapter->if_ops.clean_pcie_ring(priv->adapter); |
| 564 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); | 565 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); |
| 565 | } | 566 | } |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 7f8b5d156c8c..41d4a8167dc3 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c | |||
| @@ -5460,14 +5460,15 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) | |||
| 5460 | 5460 | ||
| 5461 | rt2800_bbp_write(rt2x00dev, 68, 0x0b); | 5461 | rt2800_bbp_write(rt2x00dev, 68, 0x0b); |
| 5462 | 5462 | ||
| 5463 | rt2800_bbp_write(rt2x00dev, 69, 0x0d); | 5463 | rt2800_bbp_write(rt2x00dev, 69, 0x12); |
| 5464 | rt2800_bbp_write(rt2x00dev, 70, 0x06); | ||
| 5465 | rt2800_bbp_write(rt2x00dev, 73, 0x13); | 5464 | rt2800_bbp_write(rt2x00dev, 73, 0x13); |
| 5466 | rt2800_bbp_write(rt2x00dev, 75, 0x46); | 5465 | rt2800_bbp_write(rt2x00dev, 75, 0x46); |
| 5467 | rt2800_bbp_write(rt2x00dev, 76, 0x28); | 5466 | rt2800_bbp_write(rt2x00dev, 76, 0x28); |
| 5468 | 5467 | ||
| 5469 | rt2800_bbp_write(rt2x00dev, 77, 0x59); | 5468 | rt2800_bbp_write(rt2x00dev, 77, 0x59); |
| 5470 | 5469 | ||
| 5470 | rt2800_bbp_write(rt2x00dev, 70, 0x0a); | ||
| 5471 | |||
| 5471 | rt2800_bbp_write(rt2x00dev, 79, 0x13); | 5472 | rt2800_bbp_write(rt2x00dev, 79, 0x13); |
| 5472 | rt2800_bbp_write(rt2x00dev, 80, 0x05); | 5473 | rt2800_bbp_write(rt2x00dev, 80, 0x05); |
| 5473 | rt2800_bbp_write(rt2x00dev, 81, 0x33); | 5474 | rt2800_bbp_write(rt2x00dev, 81, 0x33); |
| @@ -5510,7 +5511,6 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) | |||
| 5510 | if (rt2x00_rt(rt2x00dev, RT5392)) { | 5511 | if (rt2x00_rt(rt2x00dev, RT5392)) { |
| 5511 | rt2800_bbp_write(rt2x00dev, 134, 0xd0); | 5512 | rt2800_bbp_write(rt2x00dev, 134, 0xd0); |
| 5512 | rt2800_bbp_write(rt2x00dev, 135, 0xf6); | 5513 | rt2800_bbp_write(rt2x00dev, 135, 0xf6); |
| 5513 | rt2800_bbp_write(rt2x00dev, 148, 0x84); | ||
| 5514 | } | 5514 | } |
| 5515 | 5515 | ||
| 5516 | rt2800_disable_unused_dac_adc(rt2x00dev); | 5516 | rt2800_disable_unused_dac_adc(rt2x00dev); |
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c index 123c4bb50e0a..cde0eaf99714 100644 --- a/drivers/net/wireless/ti/wl1251/rx.c +++ b/drivers/net/wireless/ti/wl1251/rx.c | |||
| @@ -180,7 +180,7 @@ static void wl1251_rx_body(struct wl1251 *wl, | |||
| 180 | wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length); | 180 | wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length); |
| 181 | 181 | ||
| 182 | /* The actual length doesn't include the target's alignment */ | 182 | /* The actual length doesn't include the target's alignment */ |
| 183 | skb->len = desc->length - PLCP_HEADER_LENGTH; | 183 | skb_trim(skb, desc->length - PLCP_HEADER_LENGTH); |
| 184 | 184 | ||
| 185 | fc = (u16 *)skb->data; | 185 | fc = (u16 *)skb->data; |
| 186 | 186 | ||
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 7669d49a67e2..301cc037fda8 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
| @@ -132,8 +132,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 132 | /* If the skb is GSO then we'll also need an extra slot for the | 132 | /* If the skb is GSO then we'll also need an extra slot for the |
| 133 | * metadata. | 133 | * metadata. |
| 134 | */ | 134 | */ |
| 135 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || | 135 | if (skb_is_gso(skb)) |
| 136 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
| 137 | min_slots_needed++; | 136 | min_slots_needed++; |
| 138 | 137 | ||
| 139 | /* If the skb can't possibly fit in the remaining slots | 138 | /* If the skb can't possibly fit in the remaining slots |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index e5284bca2d90..438d0c09b7e6 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
| @@ -240,7 +240,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
| 240 | struct gnttab_copy *copy_gop; | 240 | struct gnttab_copy *copy_gop; |
| 241 | struct xenvif_rx_meta *meta; | 241 | struct xenvif_rx_meta *meta; |
| 242 | unsigned long bytes; | 242 | unsigned long bytes; |
| 243 | int gso_type; | 243 | int gso_type = XEN_NETIF_GSO_TYPE_NONE; |
| 244 | 244 | ||
| 245 | /* Data must not cross a page boundary. */ | 245 | /* Data must not cross a page boundary. */ |
| 246 | BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); | 246 | BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); |
| @@ -299,12 +299,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | /* Leave a gap for the GSO descriptor. */ | 301 | /* Leave a gap for the GSO descriptor. */ |
| 302 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | 302 | if (skb_is_gso(skb)) { |
| 303 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; | 303 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
| 304 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 304 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; |
| 305 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; | 305 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
| 306 | else | 306 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; |
| 307 | gso_type = XEN_NETIF_GSO_TYPE_NONE; | 307 | } |
| 308 | 308 | ||
| 309 | if (*head && ((1 << gso_type) & vif->gso_mask)) | 309 | if (*head && ((1 << gso_type) & vif->gso_mask)) |
| 310 | vif->rx.req_cons++; | 310 | vif->rx.req_cons++; |
| @@ -338,19 +338,15 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
| 338 | int head = 1; | 338 | int head = 1; |
| 339 | int old_meta_prod; | 339 | int old_meta_prod; |
| 340 | int gso_type; | 340 | int gso_type; |
| 341 | int gso_size; | ||
| 342 | 341 | ||
| 343 | old_meta_prod = npo->meta_prod; | 342 | old_meta_prod = npo->meta_prod; |
| 344 | 343 | ||
| 345 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { | 344 | gso_type = XEN_NETIF_GSO_TYPE_NONE; |
| 346 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; | 345 | if (skb_is_gso(skb)) { |
| 347 | gso_size = skb_shinfo(skb)->gso_size; | 346 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
| 348 | } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { | 347 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; |
| 349 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; | 348 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
| 350 | gso_size = skb_shinfo(skb)->gso_size; | 349 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; |
| 351 | } else { | ||
| 352 | gso_type = XEN_NETIF_GSO_TYPE_NONE; | ||
| 353 | gso_size = 0; | ||
| 354 | } | 350 | } |
| 355 | 351 | ||
| 356 | /* Set up a GSO prefix descriptor, if necessary */ | 352 | /* Set up a GSO prefix descriptor, if necessary */ |
| @@ -358,7 +354,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
| 358 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); | 354 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); |
| 359 | meta = npo->meta + npo->meta_prod++; | 355 | meta = npo->meta + npo->meta_prod++; |
| 360 | meta->gso_type = gso_type; | 356 | meta->gso_type = gso_type; |
| 361 | meta->gso_size = gso_size; | 357 | meta->gso_size = skb_shinfo(skb)->gso_size; |
| 362 | meta->size = 0; | 358 | meta->size = 0; |
| 363 | meta->id = req->id; | 359 | meta->id = req->id; |
| 364 | } | 360 | } |
| @@ -368,7 +364,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
| 368 | 364 | ||
| 369 | if ((1 << gso_type) & vif->gso_mask) { | 365 | if ((1 << gso_type) & vif->gso_mask) { |
| 370 | meta->gso_type = gso_type; | 366 | meta->gso_type = gso_type; |
| 371 | meta->gso_size = gso_size; | 367 | meta->gso_size = skb_shinfo(skb)->gso_size; |
| 372 | } else { | 368 | } else { |
| 373 | meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; | 369 | meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; |
| 374 | meta->gso_size = 0; | 370 | meta->gso_size = 0; |
| @@ -500,8 +496,9 @@ static void xenvif_rx_action(struct xenvif *vif) | |||
| 500 | size = skb_frag_size(&skb_shinfo(skb)->frags[i]); | 496 | size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 501 | max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); | 497 | max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); |
| 502 | } | 498 | } |
| 503 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || | 499 | if (skb_is_gso(skb) && |
| 504 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 500 | (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || |
| 501 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) | ||
| 505 | max_slots_needed++; | 502 | max_slots_needed++; |
| 506 | 503 | ||
| 507 | /* If the skb may not fit then bail out now */ | 504 | /* If the skb may not fit then bail out now */ |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index f9daa9e183f2..e30d80033cbc 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
| @@ -907,6 +907,7 @@ static int handle_incoming_queue(struct net_device *dev, | |||
| 907 | 907 | ||
| 908 | /* Ethernet work: Delayed to here as it peeks the header. */ | 908 | /* Ethernet work: Delayed to here as it peeks the header. */ |
| 909 | skb->protocol = eth_type_trans(skb, dev); | 909 | skb->protocol = eth_type_trans(skb, dev); |
| 910 | skb_reset_network_header(skb); | ||
| 910 | 911 | ||
| 911 | if (checksum_setup(dev, skb)) { | 912 | if (checksum_setup(dev, skb)) { |
| 912 | kfree_skb(skb); | 913 | kfree_skb(skb); |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 89e888a78899..1b95a405628f 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
| @@ -904,6 +904,38 @@ struct device_node *of_find_node_by_phandle(phandle handle) | |||
| 904 | EXPORT_SYMBOL(of_find_node_by_phandle); | 904 | EXPORT_SYMBOL(of_find_node_by_phandle); |
| 905 | 905 | ||
| 906 | /** | 906 | /** |
| 907 | * of_property_count_elems_of_size - Count the number of elements in a property | ||
| 908 | * | ||
| 909 | * @np: device node from which the property value is to be read. | ||
| 910 | * @propname: name of the property to be searched. | ||
| 911 | * @elem_size: size of the individual element | ||
| 912 | * | ||
| 913 | * Search for a property in a device node and count the number of elements of | ||
| 914 | * size elem_size in it. Returns number of elements on sucess, -EINVAL if the | ||
| 915 | * property does not exist or its length does not match a multiple of elem_size | ||
| 916 | * and -ENODATA if the property does not have a value. | ||
| 917 | */ | ||
| 918 | int of_property_count_elems_of_size(const struct device_node *np, | ||
| 919 | const char *propname, int elem_size) | ||
| 920 | { | ||
| 921 | struct property *prop = of_find_property(np, propname, NULL); | ||
| 922 | |||
| 923 | if (!prop) | ||
| 924 | return -EINVAL; | ||
| 925 | if (!prop->value) | ||
| 926 | return -ENODATA; | ||
| 927 | |||
| 928 | if (prop->length % elem_size != 0) { | ||
| 929 | pr_err("size of %s in node %s is not a multiple of %d\n", | ||
| 930 | propname, np->full_name, elem_size); | ||
| 931 | return -EINVAL; | ||
| 932 | } | ||
| 933 | |||
| 934 | return prop->length / elem_size; | ||
| 935 | } | ||
| 936 | EXPORT_SYMBOL_GPL(of_property_count_elems_of_size); | ||
| 937 | |||
| 938 | /** | ||
| 907 | * of_find_property_value_of_size | 939 | * of_find_property_value_of_size |
| 908 | * | 940 | * |
| 909 | * @np: device node from which the property value is to be read. | 941 | * @np: device node from which the property value is to be read. |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 00660cc502c5..38901665c770 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
| @@ -162,8 +162,6 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res, | |||
| 162 | 162 | ||
| 163 | avail = *r; | 163 | avail = *r; |
| 164 | pci_clip_resource_to_region(bus, &avail, region); | 164 | pci_clip_resource_to_region(bus, &avail, region); |
| 165 | if (!resource_size(&avail)) | ||
| 166 | continue; | ||
| 167 | 165 | ||
| 168 | /* | 166 | /* |
| 169 | * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to | 167 | * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6b05f6134b68..fdbc294821e6 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -1192,6 +1192,9 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars) | |||
| 1192 | return err; | 1192 | return err; |
| 1193 | pci_fixup_device(pci_fixup_enable, dev); | 1193 | pci_fixup_device(pci_fixup_enable, dev); |
| 1194 | 1194 | ||
| 1195 | if (dev->msi_enabled || dev->msix_enabled) | ||
| 1196 | return 0; | ||
| 1197 | |||
| 1195 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); | 1198 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); |
| 1196 | if (pin) { | 1199 | if (pin) { |
| 1197 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | 1200 | pci_read_config_word(dev, PCI_COMMAND, &cmd); |
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index be361b7cd30f..1e4e69384baa 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig | |||
| @@ -217,7 +217,7 @@ config PINCTRL_IMX28 | |||
| 217 | select PINCTRL_MXS | 217 | select PINCTRL_MXS |
| 218 | 218 | ||
| 219 | config PINCTRL_MSM | 219 | config PINCTRL_MSM |
| 220 | tristate | 220 | bool |
| 221 | select PINMUX | 221 | select PINMUX |
| 222 | select PINCONF | 222 | select PINCONF |
| 223 | select GENERIC_PINCONF | 223 | select GENERIC_PINCONF |
diff --git a/drivers/pinctrl/pinctrl-capri.c b/drivers/pinctrl/pinctrl-capri.c index 4669c53f99b0..eb2500212147 100644 --- a/drivers/pinctrl/pinctrl-capri.c +++ b/drivers/pinctrl/pinctrl-capri.c | |||
| @@ -1435,7 +1435,7 @@ int __init capri_pinctrl_probe(struct platform_device *pdev) | |||
| 1435 | } | 1435 | } |
| 1436 | 1436 | ||
| 1437 | static struct of_device_id capri_pinctrl_of_match[] = { | 1437 | static struct of_device_id capri_pinctrl_of_match[] = { |
| 1438 | { .compatible = "brcm,capri-pinctrl", }, | 1438 | { .compatible = "brcm,bcm11351-pinctrl", }, |
| 1439 | { }, | 1439 | { }, |
| 1440 | }; | 1440 | }; |
| 1441 | 1441 | ||
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c index 9ccf681dad2f..f9fabe9bf47d 100644 --- a/drivers/pinctrl/pinctrl-sunxi.c +++ b/drivers/pinctrl/pinctrl-sunxi.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
| 15 | #include <linux/gpio.h> | 15 | #include <linux/gpio.h> |
| 16 | #include <linux/irqdomain.h> | 16 | #include <linux/irqdomain.h> |
| 17 | #include <linux/irqchip/chained_irq.h> | ||
| 17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 18 | #include <linux/of.h> | 19 | #include <linux/of.h> |
| 19 | #include <linux/of_address.h> | 20 | #include <linux/of_address.h> |
| @@ -584,7 +585,7 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, | |||
| 584 | spin_lock_irqsave(&pctl->lock, flags); | 585 | spin_lock_irqsave(&pctl->lock, flags); |
| 585 | 586 | ||
| 586 | regval = readl(pctl->membase + reg); | 587 | regval = readl(pctl->membase + reg); |
| 587 | regval &= ~IRQ_CFG_IRQ_MASK; | 588 | regval &= ~(IRQ_CFG_IRQ_MASK << index); |
| 588 | writel(regval | (mode << index), pctl->membase + reg); | 589 | writel(regval | (mode << index), pctl->membase + reg); |
| 589 | 590 | ||
| 590 | spin_unlock_irqrestore(&pctl->lock, flags); | 591 | spin_unlock_irqrestore(&pctl->lock, flags); |
| @@ -665,6 +666,7 @@ static struct irq_chip sunxi_pinctrl_irq_chip = { | |||
| 665 | 666 | ||
| 666 | static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc) | 667 | static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc) |
| 667 | { | 668 | { |
| 669 | struct irq_chip *chip = irq_get_chip(irq); | ||
| 668 | struct sunxi_pinctrl *pctl = irq_get_handler_data(irq); | 670 | struct sunxi_pinctrl *pctl = irq_get_handler_data(irq); |
| 669 | const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG); | 671 | const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG); |
| 670 | 672 | ||
| @@ -674,10 +676,12 @@ static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc) | |||
| 674 | if (reg) { | 676 | if (reg) { |
| 675 | int irqoffset; | 677 | int irqoffset; |
| 676 | 678 | ||
| 679 | chained_irq_enter(chip, desc); | ||
| 677 | for_each_set_bit(irqoffset, ®, SUNXI_IRQ_NUMBER) { | 680 | for_each_set_bit(irqoffset, ®, SUNXI_IRQ_NUMBER) { |
| 678 | int pin_irq = irq_find_mapping(pctl->domain, irqoffset); | 681 | int pin_irq = irq_find_mapping(pctl->domain, irqoffset); |
| 679 | generic_handle_irq(pin_irq); | 682 | generic_handle_irq(pin_irq); |
| 680 | } | 683 | } |
| 684 | chained_irq_exit(chip, desc); | ||
| 681 | } | 685 | } |
| 682 | } | 686 | } |
| 683 | 687 | ||
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h index 01c494f8a14f..552b0e97077a 100644 --- a/drivers/pinctrl/pinctrl-sunxi.h +++ b/drivers/pinctrl/pinctrl-sunxi.h | |||
| @@ -511,7 +511,7 @@ static inline u32 sunxi_pull_offset(u16 pin) | |||
| 511 | 511 | ||
| 512 | static inline u32 sunxi_irq_cfg_reg(u16 irq) | 512 | static inline u32 sunxi_irq_cfg_reg(u16 irq) |
| 513 | { | 513 | { |
| 514 | u8 reg = irq / IRQ_CFG_IRQ_PER_REG; | 514 | u8 reg = irq / IRQ_CFG_IRQ_PER_REG * 0x04; |
| 515 | return reg + IRQ_CFG_REG; | 515 | return reg + IRQ_CFG_REG; |
| 516 | } | 516 | } |
| 517 | 517 | ||
| @@ -523,7 +523,7 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq) | |||
| 523 | 523 | ||
| 524 | static inline u32 sunxi_irq_ctrl_reg(u16 irq) | 524 | static inline u32 sunxi_irq_ctrl_reg(u16 irq) |
| 525 | { | 525 | { |
| 526 | u8 reg = irq / IRQ_CTRL_IRQ_PER_REG; | 526 | u8 reg = irq / IRQ_CTRL_IRQ_PER_REG * 0x04; |
| 527 | return reg + IRQ_CTRL_REG; | 527 | return reg + IRQ_CTRL_REG; |
| 528 | } | 528 | } |
| 529 | 529 | ||
| @@ -535,7 +535,7 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq) | |||
| 535 | 535 | ||
| 536 | static inline u32 sunxi_irq_status_reg(u16 irq) | 536 | static inline u32 sunxi_irq_status_reg(u16 irq) |
| 537 | { | 537 | { |
| 538 | u8 reg = irq / IRQ_STATUS_IRQ_PER_REG; | 538 | u8 reg = irq / IRQ_STATUS_IRQ_PER_REG * 0x04; |
| 539 | return reg + IRQ_STATUS_REG; | 539 | return reg + IRQ_STATUS_REG; |
| 540 | } | 540 | } |
| 541 | 541 | ||
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c index 77d103fe39d9..567d6918d50b 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c | |||
| @@ -89,7 +89,8 @@ enum { | |||
| 89 | 89 | ||
| 90 | /* GPSR6 */ | 90 | /* GPSR6 */ |
| 91 | FN_IP13_10, FN_IP13_11, FN_IP13_12, FN_IP13_13, FN_IP13_14, | 91 | FN_IP13_10, FN_IP13_11, FN_IP13_12, FN_IP13_13, FN_IP13_14, |
| 92 | FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, FN_IP13_22, FN_IP13_24_23, | 92 | FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, |
| 93 | FN_IP13_22, FN_IP13_24_23, FN_SD1_CLK, | ||
| 93 | FN_IP13_25, FN_IP13_26, FN_IP13_27, FN_IP13_30_28, FN_IP14_1_0, | 94 | FN_IP13_25, FN_IP13_26, FN_IP13_27, FN_IP13_30_28, FN_IP14_1_0, |
| 94 | FN_IP14_2, FN_IP14_3, FN_IP14_4, FN_IP14_5, FN_IP14_6, FN_IP14_7, | 95 | FN_IP14_2, FN_IP14_3, FN_IP14_4, FN_IP14_5, FN_IP14_6, FN_IP14_7, |
| 95 | FN_IP14_10_8, FN_IP14_13_11, FN_IP14_16_14, FN_IP14_19_17, | 96 | FN_IP14_10_8, FN_IP14_13_11, FN_IP14_16_14, FN_IP14_19_17, |
| @@ -788,6 +789,7 @@ static const u16 pinmux_data[] = { | |||
| 788 | PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN), | 789 | PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN), |
| 789 | PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC), | 790 | PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC), |
| 790 | PINMUX_DATA(DU0_DOTCLKIN_MARK, FN_DU0_DOTCLKIN), | 791 | PINMUX_DATA(DU0_DOTCLKIN_MARK, FN_DU0_DOTCLKIN), |
| 792 | PINMUX_DATA(SD1_CLK_MARK, FN_SD1_CLK), | ||
| 791 | 793 | ||
| 792 | /* IPSR0 */ | 794 | /* IPSR0 */ |
| 793 | PINMUX_IPSR_DATA(IP0_0, D0), | 795 | PINMUX_IPSR_DATA(IP0_0, D0), |
| @@ -3825,7 +3827,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { | |||
| 3825 | GP_6_11_FN, FN_IP13_25, | 3827 | GP_6_11_FN, FN_IP13_25, |
| 3826 | GP_6_10_FN, FN_IP13_24_23, | 3828 | GP_6_10_FN, FN_IP13_24_23, |
| 3827 | GP_6_9_FN, FN_IP13_22, | 3829 | GP_6_9_FN, FN_IP13_22, |
| 3828 | 0, 0, | 3830 | GP_6_8_FN, FN_SD1_CLK, |
| 3829 | GP_6_7_FN, FN_IP13_21_19, | 3831 | GP_6_7_FN, FN_IP13_21_19, |
| 3830 | GP_6_6_FN, FN_IP13_18_16, | 3832 | GP_6_6_FN, FN_IP13_18_16, |
| 3831 | GP_6_5_FN, FN_IP13_15, | 3833 | GP_6_5_FN, FN_IP13_15, |
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c index a0d6152701cd..617a4916b50f 100644 --- a/drivers/pinctrl/sirf/pinctrl-sirf.c +++ b/drivers/pinctrl/sirf/pinctrl-sirf.c | |||
| @@ -598,7 +598,7 @@ static unsigned int sirfsoc_gpio_irq_startup(struct irq_data *d) | |||
| 598 | { | 598 | { |
| 599 | struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); | 599 | struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); |
| 600 | 600 | ||
| 601 | if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq)) | 601 | if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE)) |
| 602 | dev_err(bank->chip.gc.dev, | 602 | dev_err(bank->chip.gc.dev, |
| 603 | "unable to lock HW IRQ %lu for IRQ\n", | 603 | "unable to lock HW IRQ %lu for IRQ\n", |
| 604 | d->hwirq); | 604 | d->hwirq); |
| @@ -611,7 +611,7 @@ static void sirfsoc_gpio_irq_shutdown(struct irq_data *d) | |||
| 611 | struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); | 611 | struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); |
| 612 | 612 | ||
| 613 | sirfsoc_gpio_irq_mask(d); | 613 | sirfsoc_gpio_irq_mask(d); |
| 614 | gpio_unlock_as_irq(&bank->chip.gc, d->hwirq); | 614 | gpio_unlock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE); |
| 615 | } | 615 | } |
| 616 | 616 | ||
| 617 | static struct irq_chip sirfsoc_irq_chip = { | 617 | static struct irq_chip sirfsoc_irq_chip = { |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 167f3d00c916..66977ebf13b3 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
| @@ -183,9 +183,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
| 183 | struct resource r = {0}; | 183 | struct resource r = {0}; |
| 184 | int i, flags; | 184 | int i, flags; |
| 185 | 185 | ||
| 186 | if (acpi_dev_resource_memory(res, &r) | 186 | if (acpi_dev_resource_address_space(res, &r) |
| 187 | || acpi_dev_resource_io(res, &r) | ||
| 188 | || acpi_dev_resource_address_space(res, &r) | ||
| 189 | || acpi_dev_resource_ext_address_space(res, &r)) { | 187 | || acpi_dev_resource_ext_address_space(res, &r)) { |
| 190 | pnp_add_resource(dev, &r); | 188 | pnp_add_resource(dev, &r); |
| 191 | return AE_OK; | 189 | return AE_OK; |
| @@ -217,6 +215,17 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
| 217 | } | 215 | } |
| 218 | 216 | ||
| 219 | switch (res->type) { | 217 | switch (res->type) { |
| 218 | case ACPI_RESOURCE_TYPE_MEMORY24: | ||
| 219 | case ACPI_RESOURCE_TYPE_MEMORY32: | ||
| 220 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: | ||
| 221 | if (acpi_dev_resource_memory(res, &r)) | ||
| 222 | pnp_add_resource(dev, &r); | ||
| 223 | break; | ||
| 224 | case ACPI_RESOURCE_TYPE_IO: | ||
| 225 | case ACPI_RESOURCE_TYPE_FIXED_IO: | ||
| 226 | if (acpi_dev_resource_io(res, &r)) | ||
| 227 | pnp_add_resource(dev, &r); | ||
| 228 | break; | ||
| 220 | case ACPI_RESOURCE_TYPE_DMA: | 229 | case ACPI_RESOURCE_TYPE_DMA: |
| 221 | dma = &res->data.dma; | 230 | dma = &res->data.dma; |
| 222 | if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) | 231 | if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) |
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h index b4b0d83f9ef6..7061ac0ad428 100644 --- a/drivers/rapidio/devices/tsi721.h +++ b/drivers/rapidio/devices/tsi721.h | |||
| @@ -678,6 +678,7 @@ struct tsi721_bdma_chan { | |||
| 678 | struct list_head free_list; | 678 | struct list_head free_list; |
| 679 | dma_cookie_t completed_cookie; | 679 | dma_cookie_t completed_cookie; |
| 680 | struct tasklet_struct tasklet; | 680 | struct tasklet_struct tasklet; |
| 681 | bool active; | ||
| 681 | }; | 682 | }; |
| 682 | 683 | ||
| 683 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | 684 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ |
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index 502663f5f7c6..91245f5dbe81 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c | |||
| @@ -206,8 +206,8 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) | |||
| 206 | { | 206 | { |
| 207 | /* Disable BDMA channel interrupts */ | 207 | /* Disable BDMA channel interrupts */ |
| 208 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); | 208 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); |
| 209 | 209 | if (bdma_chan->active) | |
| 210 | tasklet_schedule(&bdma_chan->tasklet); | 210 | tasklet_schedule(&bdma_chan->tasklet); |
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | #ifdef CONFIG_PCI_MSI | 213 | #ifdef CONFIG_PCI_MSI |
| @@ -562,7 +562,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) | |||
| 562 | } | 562 | } |
| 563 | #endif /* CONFIG_PCI_MSI */ | 563 | #endif /* CONFIG_PCI_MSI */ |
| 564 | 564 | ||
| 565 | tasklet_enable(&bdma_chan->tasklet); | 565 | bdma_chan->active = true; |
| 566 | tsi721_bdma_interrupt_enable(bdma_chan, 1); | 566 | tsi721_bdma_interrupt_enable(bdma_chan, 1); |
| 567 | 567 | ||
| 568 | return bdma_chan->bd_num - 1; | 568 | return bdma_chan->bd_num - 1; |
| @@ -576,9 +576,7 @@ err_out: | |||
| 576 | static void tsi721_free_chan_resources(struct dma_chan *dchan) | 576 | static void tsi721_free_chan_resources(struct dma_chan *dchan) |
| 577 | { | 577 | { |
| 578 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | 578 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); |
| 579 | #ifdef CONFIG_PCI_MSI | ||
| 580 | struct tsi721_device *priv = to_tsi721(dchan->device); | 579 | struct tsi721_device *priv = to_tsi721(dchan->device); |
| 581 | #endif | ||
| 582 | LIST_HEAD(list); | 580 | LIST_HEAD(list); |
| 583 | 581 | ||
| 584 | dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); | 582 | dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); |
| @@ -589,14 +587,25 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan) | |||
| 589 | BUG_ON(!list_empty(&bdma_chan->active_list)); | 587 | BUG_ON(!list_empty(&bdma_chan->active_list)); |
| 590 | BUG_ON(!list_empty(&bdma_chan->queue)); | 588 | BUG_ON(!list_empty(&bdma_chan->queue)); |
| 591 | 589 | ||
| 592 | tasklet_disable(&bdma_chan->tasklet); | 590 | tsi721_bdma_interrupt_enable(bdma_chan, 0); |
| 591 | bdma_chan->active = false; | ||
| 592 | |||
| 593 | #ifdef CONFIG_PCI_MSI | ||
| 594 | if (priv->flags & TSI721_USING_MSIX) { | ||
| 595 | synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + | ||
| 596 | bdma_chan->id].vector); | ||
| 597 | synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + | ||
| 598 | bdma_chan->id].vector); | ||
| 599 | } else | ||
| 600 | #endif | ||
| 601 | synchronize_irq(priv->pdev->irq); | ||
| 602 | |||
| 603 | tasklet_kill(&bdma_chan->tasklet); | ||
| 593 | 604 | ||
| 594 | spin_lock_bh(&bdma_chan->lock); | 605 | spin_lock_bh(&bdma_chan->lock); |
| 595 | list_splice_init(&bdma_chan->free_list, &list); | 606 | list_splice_init(&bdma_chan->free_list, &list); |
| 596 | spin_unlock_bh(&bdma_chan->lock); | 607 | spin_unlock_bh(&bdma_chan->lock); |
| 597 | 608 | ||
| 598 | tsi721_bdma_interrupt_enable(bdma_chan, 0); | ||
| 599 | |||
| 600 | #ifdef CONFIG_PCI_MSI | 609 | #ifdef CONFIG_PCI_MSI |
| 601 | if (priv->flags & TSI721_USING_MSIX) { | 610 | if (priv->flags & TSI721_USING_MSIX) { |
| 602 | free_irq(priv->msix[TSI721_VECT_DMA0_DONE + | 611 | free_irq(priv->msix[TSI721_VECT_DMA0_DONE + |
| @@ -790,6 +799,7 @@ int tsi721_register_dma(struct tsi721_device *priv) | |||
| 790 | bdma_chan->dchan.cookie = 1; | 799 | bdma_chan->dchan.cookie = 1; |
| 791 | bdma_chan->dchan.chan_id = i; | 800 | bdma_chan->dchan.chan_id = i; |
| 792 | bdma_chan->id = i; | 801 | bdma_chan->id = i; |
| 802 | bdma_chan->active = false; | ||
| 793 | 803 | ||
| 794 | spin_lock_init(&bdma_chan->lock); | 804 | spin_lock_init(&bdma_chan->lock); |
| 795 | 805 | ||
| @@ -799,7 +809,6 @@ int tsi721_register_dma(struct tsi721_device *priv) | |||
| 799 | 809 | ||
| 800 | tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, | 810 | tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, |
| 801 | (unsigned long)bdma_chan); | 811 | (unsigned long)bdma_chan); |
| 802 | tasklet_disable(&bdma_chan->tasklet); | ||
| 803 | list_add_tail(&bdma_chan->dchan.device_node, | 812 | list_add_tail(&bdma_chan->dchan.device_node, |
| 804 | &mport->dma.channels); | 813 | &mport->dma.channels); |
| 805 | } | 814 | } |
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c index d333f7eac106..7a721d67e6ac 100644 --- a/drivers/regulator/88pm800.c +++ b/drivers/regulator/88pm800.c | |||
| @@ -310,10 +310,8 @@ static int pm800_regulator_probe(struct platform_device *pdev) | |||
| 310 | 310 | ||
| 311 | pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data), | 311 | pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data), |
| 312 | GFP_KERNEL); | 312 | GFP_KERNEL); |
| 313 | if (!pm800_data) { | 313 | if (!pm800_data) |
| 314 | dev_err(&pdev->dev, "Failed to allocate pm800_regualtors"); | ||
| 315 | return -ENOMEM; | 314 | return -ENOMEM; |
| 316 | } | ||
| 317 | 315 | ||
| 318 | pm800_data->map = chip->subchip->regmap_power; | 316 | pm800_data->map = chip->subchip->regmap_power; |
| 319 | pm800_data->chip = chip; | 317 | pm800_data->chip = chip; |
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c index f704d83c93c4..337634ad0562 100644 --- a/drivers/regulator/88pm8607.c +++ b/drivers/regulator/88pm8607.c | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * Regulators driver for Marvell 88PM8607 | 2 | * Regulators driver for Marvell 88PM8607 |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2009 Marvell International Ltd. | 4 | * Copyright (C) 2009 Marvell International Ltd. |
| 5 | * Haojian Zhuang <haojian.zhuang@marvell.com> | 5 | * Haojian Zhuang <haojian.zhuang@marvell.com> |
| 6 | * | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
| @@ -78,7 +78,7 @@ static const unsigned int BUCK2_suspend_table[] = { | |||
| 78 | }; | 78 | }; |
| 79 | 79 | ||
| 80 | static const unsigned int BUCK3_table[] = { | 80 | static const unsigned int BUCK3_table[] = { |
| 81 | 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000, | 81 | 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000, |
| 82 | 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000, | 82 | 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000, |
| 83 | 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000, | 83 | 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000, |
| 84 | 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, | 84 | 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, |
| @@ -89,7 +89,7 @@ static const unsigned int BUCK3_table[] = { | |||
| 89 | }; | 89 | }; |
| 90 | 90 | ||
| 91 | static const unsigned int BUCK3_suspend_table[] = { | 91 | static const unsigned int BUCK3_suspend_table[] = { |
| 92 | 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000, | 92 | 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000, |
| 93 | 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000, | 93 | 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000, |
| 94 | 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000, | 94 | 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000, |
| 95 | 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, | 95 | 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, |
| @@ -322,7 +322,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev, | |||
| 322 | nproot = of_node_get(pdev->dev.parent->of_node); | 322 | nproot = of_node_get(pdev->dev.parent->of_node); |
| 323 | if (!nproot) | 323 | if (!nproot) |
| 324 | return -ENODEV; | 324 | return -ENODEV; |
| 325 | nproot = of_find_node_by_name(nproot, "regulators"); | 325 | nproot = of_get_child_by_name(nproot, "regulators"); |
| 326 | if (!nproot) { | 326 | if (!nproot) { |
| 327 | dev_err(&pdev->dev, "failed to find regulators node\n"); | 327 | dev_err(&pdev->dev, "failed to find regulators node\n"); |
| 328 | return -ENODEV; | 328 | return -ENODEV; |
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 69e6bf755e00..e5e4017b1011 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig | |||
| @@ -139,6 +139,14 @@ config REGULATOR_AS3722 | |||
| 139 | AS3722 PMIC. This will enable support for all the software | 139 | AS3722 PMIC. This will enable support for all the software |
| 140 | controllable DCDC/LDO regulators. | 140 | controllable DCDC/LDO regulators. |
| 141 | 141 | ||
| 142 | config REGULATOR_BCM590XX | ||
| 143 | tristate "Broadcom BCM590xx PMU Regulators" | ||
| 144 | depends on MFD_BCM590XX | ||
| 145 | help | ||
| 146 | This driver provides support for the voltage regulators on the | ||
| 147 | BCM590xx PMUs. This will enable support for the software | ||
| 148 | controllable LDO/Switching regulators. | ||
| 149 | |||
| 142 | config REGULATOR_DA903X | 150 | config REGULATOR_DA903X |
| 143 | tristate "Dialog Semiconductor DA9030/DA9034 regulators" | 151 | tristate "Dialog Semiconductor DA9030/DA9034 regulators" |
| 144 | depends on PMIC_DA903X | 152 | depends on PMIC_DA903X |
| @@ -399,12 +407,12 @@ config REGULATOR_PCF50633 | |||
| 399 | on PCF50633 | 407 | on PCF50633 |
| 400 | 408 | ||
| 401 | config REGULATOR_PFUZE100 | 409 | config REGULATOR_PFUZE100 |
| 402 | tristate "Freescale PFUZE100 regulator driver" | 410 | tristate "Freescale PFUZE100/PFUZE200 regulator driver" |
| 403 | depends on I2C | 411 | depends on I2C |
| 404 | select REGMAP_I2C | 412 | select REGMAP_I2C |
| 405 | help | 413 | help |
| 406 | Say y here to support the regulators found on the Freescale PFUZE100 | 414 | Say y here to support the regulators found on the Freescale |
| 407 | PMIC. | 415 | PFUZE100/PFUZE200 PMIC. |
| 408 | 416 | ||
| 409 | config REGULATOR_RC5T583 | 417 | config REGULATOR_RC5T583 |
| 410 | tristate "RICOH RC5T583 Power regulators" | 418 | tristate "RICOH RC5T583 Power regulators" |
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index b3ece84289cf..c3416728c14d 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile | |||
| @@ -20,6 +20,7 @@ obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o | |||
| 20 | obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o | 20 | obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o |
| 21 | obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o | 21 | obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o |
| 22 | obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o | 22 | obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o |
| 23 | obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o | ||
| 23 | obj-$(CONFIG_REGULATOR_DA903X) += da903x.o | 24 | obj-$(CONFIG_REGULATOR_DA903X) += da903x.o |
| 24 | obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o | 25 | obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o |
| 25 | obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o | 26 | obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o |
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c index f70a9bfa5ff2..c873ee0082cf 100644 --- a/drivers/regulator/aat2870-regulator.c +++ b/drivers/regulator/aat2870-regulator.c | |||
| @@ -99,6 +99,7 @@ static int aat2870_ldo_is_enabled(struct regulator_dev *rdev) | |||
| 99 | 99 | ||
| 100 | static struct regulator_ops aat2870_ldo_ops = { | 100 | static struct regulator_ops aat2870_ldo_ops = { |
| 101 | .list_voltage = regulator_list_voltage_table, | 101 | .list_voltage = regulator_list_voltage_table, |
| 102 | .map_voltage = regulator_map_voltage_ascend, | ||
| 102 | .set_voltage_sel = aat2870_ldo_set_voltage_sel, | 103 | .set_voltage_sel = aat2870_ldo_set_voltage_sel, |
| 103 | .get_voltage_sel = aat2870_ldo_get_voltage_sel, | 104 | .get_voltage_sel = aat2870_ldo_get_voltage_sel, |
| 104 | .enable = aat2870_ldo_enable, | 105 | .enable = aat2870_ldo_enable, |
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c index 084cc0819a52..b92d7dd01a18 100644 --- a/drivers/regulator/act8865-regulator.c +++ b/drivers/regulator/act8865-regulator.c | |||
| @@ -62,7 +62,6 @@ | |||
| 62 | #define ACT8865_VOLTAGE_NUM 64 | 62 | #define ACT8865_VOLTAGE_NUM 64 |
| 63 | 63 | ||
| 64 | struct act8865 { | 64 | struct act8865 { |
| 65 | struct regulator_dev *rdev[ACT8865_REG_NUM]; | ||
| 66 | struct regmap *regmap; | 65 | struct regmap *regmap; |
| 67 | }; | 66 | }; |
| 68 | 67 | ||
| @@ -213,7 +212,7 @@ static int act8865_pdata_from_dt(struct device *dev, | |||
| 213 | struct device_node *np; | 212 | struct device_node *np; |
| 214 | struct act8865_regulator_data *regulator; | 213 | struct act8865_regulator_data *regulator; |
| 215 | 214 | ||
| 216 | np = of_find_node_by_name(dev->of_node, "regulators"); | 215 | np = of_get_child_by_name(dev->of_node, "regulators"); |
| 217 | if (!np) { | 216 | if (!np) { |
| 218 | dev_err(dev, "missing 'regulators' subnode in DT\n"); | 217 | dev_err(dev, "missing 'regulators' subnode in DT\n"); |
| 219 | return -EINVAL; | 218 | return -EINVAL; |
| @@ -221,17 +220,15 @@ static int act8865_pdata_from_dt(struct device *dev, | |||
| 221 | 220 | ||
| 222 | matched = of_regulator_match(dev, np, | 221 | matched = of_regulator_match(dev, np, |
| 223 | act8865_matches, ARRAY_SIZE(act8865_matches)); | 222 | act8865_matches, ARRAY_SIZE(act8865_matches)); |
| 223 | of_node_put(np); | ||
| 224 | if (matched <= 0) | 224 | if (matched <= 0) |
| 225 | return matched; | 225 | return matched; |
| 226 | 226 | ||
| 227 | pdata->regulators = devm_kzalloc(dev, | 227 | pdata->regulators = devm_kzalloc(dev, |
| 228 | sizeof(struct act8865_regulator_data) * | 228 | sizeof(struct act8865_regulator_data) * |
| 229 | ARRAY_SIZE(act8865_matches), GFP_KERNEL); | 229 | ARRAY_SIZE(act8865_matches), GFP_KERNEL); |
| 230 | if (!pdata->regulators) { | 230 | if (!pdata->regulators) |
| 231 | dev_err(dev, "%s: failed to allocate act8865 registor\n", | ||
| 232 | __func__); | ||
| 233 | return -ENOMEM; | 231 | return -ENOMEM; |
| 234 | } | ||
| 235 | 232 | ||
| 236 | pdata->num_regulators = matched; | 233 | pdata->num_regulators = matched; |
| 237 | regulator = pdata->regulators; | 234 | regulator = pdata->regulators; |
| @@ -258,7 +255,7 @@ static inline int act8865_pdata_from_dt(struct device *dev, | |||
| 258 | static int act8865_pmic_probe(struct i2c_client *client, | 255 | static int act8865_pmic_probe(struct i2c_client *client, |
| 259 | const struct i2c_device_id *i2c_id) | 256 | const struct i2c_device_id *i2c_id) |
| 260 | { | 257 | { |
| 261 | struct regulator_dev **rdev; | 258 | struct regulator_dev *rdev; |
| 262 | struct device *dev = &client->dev; | 259 | struct device *dev = &client->dev; |
| 263 | struct act8865_platform_data *pdata = dev_get_platdata(dev); | 260 | struct act8865_platform_data *pdata = dev_get_platdata(dev); |
| 264 | struct regulator_config config = { }; | 261 | struct regulator_config config = { }; |
| @@ -292,8 +289,6 @@ static int act8865_pmic_probe(struct i2c_client *client, | |||
| 292 | if (!act8865) | 289 | if (!act8865) |
| 293 | return -ENOMEM; | 290 | return -ENOMEM; |
| 294 | 291 | ||
| 295 | rdev = act8865->rdev; | ||
| 296 | |||
| 297 | act8865->regmap = devm_regmap_init_i2c(client, &act8865_regmap_config); | 292 | act8865->regmap = devm_regmap_init_i2c(client, &act8865_regmap_config); |
| 298 | if (IS_ERR(act8865->regmap)) { | 293 | if (IS_ERR(act8865->regmap)) { |
| 299 | error = PTR_ERR(act8865->regmap); | 294 | error = PTR_ERR(act8865->regmap); |
| @@ -313,12 +308,12 @@ static int act8865_pmic_probe(struct i2c_client *client, | |||
| 313 | config.driver_data = act8865; | 308 | config.driver_data = act8865; |
| 314 | config.regmap = act8865->regmap; | 309 | config.regmap = act8865->regmap; |
| 315 | 310 | ||
| 316 | rdev[i] = devm_regulator_register(&client->dev, | 311 | rdev = devm_regulator_register(&client->dev, &act8865_reg[i], |
| 317 | &act8865_reg[i], &config); | 312 | &config); |
| 318 | if (IS_ERR(rdev[i])) { | 313 | if (IS_ERR(rdev)) { |
| 319 | dev_err(dev, "failed to register %s\n", | 314 | dev_err(dev, "failed to register %s\n", |
| 320 | act8865_reg[id].name); | 315 | act8865_reg[id].name); |
| 321 | return PTR_ERR(rdev[i]); | 316 | return PTR_ERR(rdev); |
| 322 | } | 317 | } |
| 323 | } | 318 | } |
| 324 | 319 | ||
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c index 862e63e451d0..7c397bb81e01 100644 --- a/drivers/regulator/anatop-regulator.c +++ b/drivers/regulator/anatop-regulator.c | |||
| @@ -34,6 +34,9 @@ | |||
| 34 | #define LDO_RAMP_UP_UNIT_IN_CYCLES 64 /* 64 cycles per step */ | 34 | #define LDO_RAMP_UP_UNIT_IN_CYCLES 64 /* 64 cycles per step */ |
| 35 | #define LDO_RAMP_UP_FREQ_IN_MHZ 24 /* cycle based on 24M OSC */ | 35 | #define LDO_RAMP_UP_FREQ_IN_MHZ 24 /* cycle based on 24M OSC */ |
| 36 | 36 | ||
| 37 | #define LDO_POWER_GATE 0x00 | ||
| 38 | #define LDO_FET_FULL_ON 0x1f | ||
| 39 | |||
| 37 | struct anatop_regulator { | 40 | struct anatop_regulator { |
| 38 | const char *name; | 41 | const char *name; |
| 39 | u32 control_reg; | 42 | u32 control_reg; |
| @@ -48,19 +51,10 @@ struct anatop_regulator { | |||
| 48 | int max_voltage; | 51 | int max_voltage; |
| 49 | struct regulator_desc rdesc; | 52 | struct regulator_desc rdesc; |
| 50 | struct regulator_init_data *initdata; | 53 | struct regulator_init_data *initdata; |
| 54 | bool bypass; | ||
| 55 | int sel; | ||
| 51 | }; | 56 | }; |
| 52 | 57 | ||
| 53 | static int anatop_regmap_set_voltage_sel(struct regulator_dev *reg, | ||
| 54 | unsigned selector) | ||
| 55 | { | ||
| 56 | struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); | ||
| 57 | |||
| 58 | if (!anatop_reg->control_reg) | ||
| 59 | return -ENOTSUPP; | ||
| 60 | |||
| 61 | return regulator_set_voltage_sel_regmap(reg, selector); | ||
| 62 | } | ||
| 63 | |||
| 64 | static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg, | 58 | static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg, |
| 65 | unsigned int old_sel, | 59 | unsigned int old_sel, |
| 66 | unsigned int new_sel) | 60 | unsigned int new_sel) |
| @@ -87,22 +81,99 @@ static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg, | |||
| 87 | return ret; | 81 | return ret; |
| 88 | } | 82 | } |
| 89 | 83 | ||
| 90 | static int anatop_regmap_get_voltage_sel(struct regulator_dev *reg) | 84 | static int anatop_regmap_enable(struct regulator_dev *reg) |
| 91 | { | 85 | { |
| 92 | struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); | 86 | struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); |
| 87 | int sel; | ||
| 93 | 88 | ||
| 94 | if (!anatop_reg->control_reg) | 89 | sel = anatop_reg->bypass ? LDO_FET_FULL_ON : anatop_reg->sel; |
| 95 | return -ENOTSUPP; | 90 | return regulator_set_voltage_sel_regmap(reg, sel); |
| 91 | } | ||
| 92 | |||
| 93 | static int anatop_regmap_disable(struct regulator_dev *reg) | ||
| 94 | { | ||
| 95 | return regulator_set_voltage_sel_regmap(reg, LDO_POWER_GATE); | ||
| 96 | } | ||
| 97 | |||
| 98 | static int anatop_regmap_is_enabled(struct regulator_dev *reg) | ||
| 99 | { | ||
| 100 | return regulator_get_voltage_sel_regmap(reg) != LDO_POWER_GATE; | ||
| 101 | } | ||
| 102 | |||
| 103 | static int anatop_regmap_core_set_voltage_sel(struct regulator_dev *reg, | ||
| 104 | unsigned selector) | ||
| 105 | { | ||
| 106 | struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); | ||
| 107 | int ret; | ||
| 108 | |||
| 109 | if (anatop_reg->bypass || !anatop_regmap_is_enabled(reg)) { | ||
| 110 | anatop_reg->sel = selector; | ||
| 111 | return 0; | ||
| 112 | } | ||
| 113 | |||
| 114 | ret = regulator_set_voltage_sel_regmap(reg, selector); | ||
| 115 | if (!ret) | ||
| 116 | anatop_reg->sel = selector; | ||
| 117 | return ret; | ||
| 118 | } | ||
| 119 | |||
| 120 | static int anatop_regmap_core_get_voltage_sel(struct regulator_dev *reg) | ||
| 121 | { | ||
| 122 | struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); | ||
| 123 | |||
| 124 | if (anatop_reg->bypass || !anatop_regmap_is_enabled(reg)) | ||
| 125 | return anatop_reg->sel; | ||
| 96 | 126 | ||
| 97 | return regulator_get_voltage_sel_regmap(reg); | 127 | return regulator_get_voltage_sel_regmap(reg); |
| 98 | } | 128 | } |
| 99 | 129 | ||
| 130 | static int anatop_regmap_get_bypass(struct regulator_dev *reg, bool *enable) | ||
| 131 | { | ||
| 132 | struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); | ||
| 133 | int sel; | ||
| 134 | |||
| 135 | sel = regulator_get_voltage_sel_regmap(reg); | ||
| 136 | if (sel == LDO_FET_FULL_ON) | ||
| 137 | WARN_ON(!anatop_reg->bypass); | ||
| 138 | else if (sel != LDO_POWER_GATE) | ||
| 139 | WARN_ON(anatop_reg->bypass); | ||
| 140 | |||
| 141 | *enable = anatop_reg->bypass; | ||
| 142 | return 0; | ||
| 143 | } | ||
| 144 | |||
| 145 | static int anatop_regmap_set_bypass(struct regulator_dev *reg, bool enable) | ||
| 146 | { | ||
| 147 | struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); | ||
| 148 | int sel; | ||
| 149 | |||
| 150 | if (enable == anatop_reg->bypass) | ||
| 151 | return 0; | ||
| 152 | |||
| 153 | sel = enable ? LDO_FET_FULL_ON : anatop_reg->sel; | ||
| 154 | anatop_reg->bypass = enable; | ||
| 155 | |||
| 156 | return regulator_set_voltage_sel_regmap(reg, sel); | ||
| 157 | } | ||
| 158 | |||
| 100 | static struct regulator_ops anatop_rops = { | 159 | static struct regulator_ops anatop_rops = { |
| 101 | .set_voltage_sel = anatop_regmap_set_voltage_sel, | 160 | .set_voltage_sel = regulator_set_voltage_sel_regmap, |
| 161 | .get_voltage_sel = regulator_get_voltage_sel_regmap, | ||
| 162 | .list_voltage = regulator_list_voltage_linear, | ||
| 163 | .map_voltage = regulator_map_voltage_linear, | ||
| 164 | }; | ||
| 165 | |||
| 166 | static struct regulator_ops anatop_core_rops = { | ||
| 167 | .enable = anatop_regmap_enable, | ||
| 168 | .disable = anatop_regmap_disable, | ||
| 169 | .is_enabled = anatop_regmap_is_enabled, | ||
| 170 | .set_voltage_sel = anatop_regmap_core_set_voltage_sel, | ||
| 102 | .set_voltage_time_sel = anatop_regmap_set_voltage_time_sel, | 171 | .set_voltage_time_sel = anatop_regmap_set_voltage_time_sel, |
| 103 | .get_voltage_sel = anatop_regmap_get_voltage_sel, | 172 | .get_voltage_sel = anatop_regmap_core_get_voltage_sel, |
| 104 | .list_voltage = regulator_list_voltage_linear, | 173 | .list_voltage = regulator_list_voltage_linear, |
| 105 | .map_voltage = regulator_map_voltage_linear, | 174 | .map_voltage = regulator_map_voltage_linear, |
| 175 | .get_bypass = anatop_regmap_get_bypass, | ||
| 176 | .set_bypass = anatop_regmap_set_bypass, | ||
| 106 | }; | 177 | }; |
| 107 | 178 | ||
| 108 | static int anatop_regulator_probe(struct platform_device *pdev) | 179 | static int anatop_regulator_probe(struct platform_device *pdev) |
| @@ -116,6 +187,7 @@ static int anatop_regulator_probe(struct platform_device *pdev) | |||
| 116 | struct regulator_init_data *initdata; | 187 | struct regulator_init_data *initdata; |
| 117 | struct regulator_config config = { }; | 188 | struct regulator_config config = { }; |
| 118 | int ret = 0; | 189 | int ret = 0; |
| 190 | u32 val; | ||
| 119 | 191 | ||
| 120 | initdata = of_get_regulator_init_data(dev, np); | 192 | initdata = of_get_regulator_init_data(dev, np); |
| 121 | sreg = devm_kzalloc(dev, sizeof(*sreg), GFP_KERNEL); | 193 | sreg = devm_kzalloc(dev, sizeof(*sreg), GFP_KERNEL); |
| @@ -125,7 +197,6 @@ static int anatop_regulator_probe(struct platform_device *pdev) | |||
| 125 | sreg->name = of_get_property(np, "regulator-name", NULL); | 197 | sreg->name = of_get_property(np, "regulator-name", NULL); |
| 126 | rdesc = &sreg->rdesc; | 198 | rdesc = &sreg->rdesc; |
| 127 | rdesc->name = sreg->name; | 199 | rdesc->name = sreg->name; |
| 128 | rdesc->ops = &anatop_rops; | ||
| 129 | rdesc->type = REGULATOR_VOLTAGE; | 200 | rdesc->type = REGULATOR_VOLTAGE; |
| 130 | rdesc->owner = THIS_MODULE; | 201 | rdesc->owner = THIS_MODULE; |
| 131 | 202 | ||
| @@ -197,6 +268,25 @@ static int anatop_regulator_probe(struct platform_device *pdev) | |||
| 197 | config.of_node = pdev->dev.of_node; | 268 | config.of_node = pdev->dev.of_node; |
| 198 | config.regmap = sreg->anatop; | 269 | config.regmap = sreg->anatop; |
| 199 | 270 | ||
| 271 | /* Only core regulators have the ramp up delay configuration. */ | ||
| 272 | if (sreg->control_reg && sreg->delay_bit_width) { | ||
| 273 | rdesc->ops = &anatop_core_rops; | ||
| 274 | |||
| 275 | ret = regmap_read(config.regmap, rdesc->vsel_reg, &val); | ||
| 276 | if (ret) { | ||
| 277 | dev_err(dev, "failed to read initial state\n"); | ||
| 278 | return ret; | ||
| 279 | } | ||
| 280 | |||
| 281 | sreg->sel = (val & rdesc->vsel_mask) >> sreg->vol_bit_shift; | ||
| 282 | if (sreg->sel == LDO_FET_FULL_ON) { | ||
| 283 | sreg->sel = 0; | ||
| 284 | sreg->bypass = true; | ||
| 285 | } | ||
| 286 | } else { | ||
| 287 | rdesc->ops = &anatop_rops; | ||
| 288 | } | ||
| 289 | |||
| 200 | /* register regulator */ | 290 | /* register regulator */ |
| 201 | rdev = devm_regulator_register(dev, rdesc, &config); | 291 | rdev = devm_regulator_register(dev, rdesc, &config); |
| 202 | if (IS_ERR(rdev)) { | 292 | if (IS_ERR(rdev)) { |
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c index 4f6c2055f6b2..b1033d30b504 100644 --- a/drivers/regulator/arizona-ldo1.c +++ b/drivers/regulator/arizona-ldo1.c | |||
| @@ -153,11 +153,9 @@ static const struct regulator_desc arizona_ldo1 = { | |||
| 153 | 153 | ||
| 154 | .vsel_reg = ARIZONA_LDO1_CONTROL_1, | 154 | .vsel_reg = ARIZONA_LDO1_CONTROL_1, |
| 155 | .vsel_mask = ARIZONA_LDO1_VSEL_MASK, | 155 | .vsel_mask = ARIZONA_LDO1_VSEL_MASK, |
| 156 | .bypass_reg = ARIZONA_LDO1_CONTROL_1, | ||
| 157 | .bypass_mask = ARIZONA_LDO1_BYPASS, | ||
| 158 | .min_uV = 900000, | 156 | .min_uV = 900000, |
| 159 | .uV_step = 50000, | 157 | .uV_step = 25000, |
| 160 | .n_voltages = 7, | 158 | .n_voltages = 13, |
| 161 | .enable_time = 500, | 159 | .enable_time = 500, |
| 162 | 160 | ||
| 163 | .owner = THIS_MODULE, | 161 | .owner = THIS_MODULE, |
| @@ -189,10 +187,8 @@ static int arizona_ldo1_probe(struct platform_device *pdev) | |||
| 189 | int ret; | 187 | int ret; |
| 190 | 188 | ||
| 191 | ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL); | 189 | ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL); |
| 192 | if (ldo1 == NULL) { | 190 | if (!ldo1) |
| 193 | dev_err(&pdev->dev, "Unable to allocate private data\n"); | ||
| 194 | return -ENOMEM; | 191 | return -ENOMEM; |
| 195 | } | ||
| 196 | 192 | ||
| 197 | ldo1->arizona = arizona; | 193 | ldo1->arizona = arizona; |
| 198 | 194 | ||
| @@ -203,6 +199,7 @@ static int arizona_ldo1_probe(struct platform_device *pdev) | |||
| 203 | */ | 199 | */ |
| 204 | switch (arizona->type) { | 200 | switch (arizona->type) { |
| 205 | case WM5102: | 201 | case WM5102: |
| 202 | case WM8997: | ||
| 206 | desc = &arizona_ldo1_hc; | 203 | desc = &arizona_ldo1_hc; |
| 207 | ldo1->init_data = arizona_ldo1_dvfs; | 204 | ldo1->init_data = arizona_ldo1_dvfs; |
| 208 | break; | 205 | break; |
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c index 034ece707083..6fdd9bf6927f 100644 --- a/drivers/regulator/arizona-micsupp.c +++ b/drivers/regulator/arizona-micsupp.c | |||
| @@ -204,10 +204,8 @@ static int arizona_micsupp_probe(struct platform_device *pdev) | |||
| 204 | int ret; | 204 | int ret; |
| 205 | 205 | ||
| 206 | micsupp = devm_kzalloc(&pdev->dev, sizeof(*micsupp), GFP_KERNEL); | 206 | micsupp = devm_kzalloc(&pdev->dev, sizeof(*micsupp), GFP_KERNEL); |
| 207 | if (micsupp == NULL) { | 207 | if (!micsupp) |
| 208 | dev_err(&pdev->dev, "Unable to allocate private data\n"); | ||
| 209 | return -ENOMEM; | 208 | return -ENOMEM; |
| 210 | } | ||
| 211 | 209 | ||
| 212 | micsupp->arizona = arizona; | 210 | micsupp->arizona = arizona; |
| 213 | INIT_WORK(&micsupp->check_cp_work, arizona_micsupp_check_cp); | 211 | INIT_WORK(&micsupp->check_cp_work, arizona_micsupp_check_cp); |
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c index c77a58478cca..b47283f91e2d 100644 --- a/drivers/regulator/as3711-regulator.c +++ b/drivers/regulator/as3711-regulator.c | |||
| @@ -191,7 +191,7 @@ static int as3711_regulator_parse_dt(struct device *dev, | |||
| 191 | { | 191 | { |
| 192 | struct as3711_regulator_pdata *pdata = dev_get_platdata(dev); | 192 | struct as3711_regulator_pdata *pdata = dev_get_platdata(dev); |
| 193 | struct device_node *regulators = | 193 | struct device_node *regulators = |
| 194 | of_find_node_by_name(dev->parent->of_node, "regulators"); | 194 | of_get_child_by_name(dev->parent->of_node, "regulators"); |
| 195 | struct of_regulator_match *match; | 195 | struct of_regulator_match *match; |
| 196 | int ret, i; | 196 | int ret, i; |
| 197 | 197 | ||
| @@ -221,7 +221,6 @@ static int as3711_regulator_probe(struct platform_device *pdev) | |||
| 221 | { | 221 | { |
| 222 | struct as3711_regulator_pdata *pdata = dev_get_platdata(&pdev->dev); | 222 | struct as3711_regulator_pdata *pdata = dev_get_platdata(&pdev->dev); |
| 223 | struct as3711 *as3711 = dev_get_drvdata(pdev->dev.parent); | 223 | struct as3711 *as3711 = dev_get_drvdata(pdev->dev.parent); |
| 224 | struct regulator_init_data *reg_data; | ||
| 225 | struct regulator_config config = {.dev = &pdev->dev,}; | 224 | struct regulator_config config = {.dev = &pdev->dev,}; |
| 226 | struct as3711_regulator *reg = NULL; | 225 | struct as3711_regulator *reg = NULL; |
| 227 | struct as3711_regulator *regs; | 226 | struct as3711_regulator *regs; |
| @@ -246,22 +245,14 @@ static int as3711_regulator_probe(struct platform_device *pdev) | |||
| 246 | 245 | ||
| 247 | regs = devm_kzalloc(&pdev->dev, AS3711_REGULATOR_NUM * | 246 | regs = devm_kzalloc(&pdev->dev, AS3711_REGULATOR_NUM * |
| 248 | sizeof(struct as3711_regulator), GFP_KERNEL); | 247 | sizeof(struct as3711_regulator), GFP_KERNEL); |
| 249 | if (!regs) { | 248 | if (!regs) |
| 250 | dev_err(&pdev->dev, "Memory allocation failed exiting..\n"); | ||
| 251 | return -ENOMEM; | 249 | return -ENOMEM; |
| 252 | } | ||
| 253 | 250 | ||
| 254 | for (id = 0, ri = as3711_reg_info; id < AS3711_REGULATOR_NUM; ++id, ri++) { | 251 | for (id = 0, ri = as3711_reg_info; id < AS3711_REGULATOR_NUM; ++id, ri++) { |
| 255 | reg_data = pdata->init_data[id]; | ||
| 256 | |||
| 257 | /* No need to register if there is no regulator data */ | ||
| 258 | if (!reg_data) | ||
| 259 | continue; | ||
| 260 | |||
| 261 | reg = ®s[id]; | 252 | reg = ®s[id]; |
| 262 | reg->reg_info = ri; | 253 | reg->reg_info = ri; |
| 263 | 254 | ||
| 264 | config.init_data = reg_data; | 255 | config.init_data = pdata->init_data[id]; |
| 265 | config.driver_data = reg; | 256 | config.driver_data = reg; |
| 266 | config.regmap = as3711->regmap; | 257 | config.regmap = as3711->regmap; |
| 267 | config.of_node = of_node[id]; | 258 | config.of_node = of_node[id]; |
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c index 8b17d786cb71..85585219ce82 100644 --- a/drivers/regulator/as3722-regulator.c +++ b/drivers/regulator/as3722-regulator.c | |||
| @@ -719,6 +719,7 @@ static int as3722_get_regulator_dt_data(struct platform_device *pdev, | |||
| 719 | 719 | ||
| 720 | ret = of_regulator_match(&pdev->dev, np, as3722_regulator_matches, | 720 | ret = of_regulator_match(&pdev->dev, np, as3722_regulator_matches, |
| 721 | ARRAY_SIZE(as3722_regulator_matches)); | 721 | ARRAY_SIZE(as3722_regulator_matches)); |
| 722 | of_node_put(np); | ||
| 722 | if (ret < 0) { | 723 | if (ret < 0) { |
| 723 | dev_err(&pdev->dev, "Parsing of regulator node failed: %d\n", | 724 | dev_err(&pdev->dev, "Parsing of regulator node failed: %d\n", |
| 724 | ret); | 725 | ret); |
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c new file mode 100644 index 000000000000..ab08ca7cfb08 --- /dev/null +++ b/drivers/regulator/bcm590xx-regulator.c | |||
| @@ -0,0 +1,403 @@ | |||
| 1 | /* | ||
| 2 | * Broadcom BCM590xx regulator driver | ||
| 3 | * | ||
| 4 | * Copyright 2014 Linaro Limited | ||
| 5 | * Author: Matt Porter <mporter@linaro.org> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify it | ||
| 8 | * under the terms of the GNU General Public License as published by the | ||
| 9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
| 10 | * option) any later version. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/err.h> | ||
| 14 | #include <linux/init.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/mfd/bcm590xx.h> | ||
| 17 | #include <linux/module.h> | ||
| 18 | #include <linux/of.h> | ||
| 19 | #include <linux/platform_device.h> | ||
| 20 | #include <linux/regulator/driver.h> | ||
| 21 | #include <linux/regulator/machine.h> | ||
| 22 | #include <linux/regulator/of_regulator.h> | ||
| 23 | #include <linux/slab.h> | ||
| 24 | |||
| 25 | /* Register defs */ | ||
| 26 | #define BCM590XX_RFLDOPMCTRL1 0x60 | ||
| 27 | #define BCM590XX_IOSR1PMCTRL1 0x7a | ||
| 28 | #define BCM590XX_IOSR2PMCTRL1 0x7c | ||
| 29 | #define BCM590XX_CSRPMCTRL1 0x7e | ||
| 30 | #define BCM590XX_SDSR1PMCTRL1 0x82 | ||
| 31 | #define BCM590XX_SDSR2PMCTRL1 0x86 | ||
| 32 | #define BCM590XX_MSRPMCTRL1 0x8a | ||
| 33 | #define BCM590XX_VSRPMCTRL1 0x8e | ||
| 34 | #define BCM590XX_REG_ENABLE BIT(7) | ||
| 35 | |||
| 36 | #define BCM590XX_RFLDOCTRL 0x96 | ||
| 37 | #define BCM590XX_CSRVOUT1 0xc0 | ||
| 38 | #define BCM590XX_LDO_VSEL_MASK GENMASK(5, 3) | ||
| 39 | #define BCM590XX_SR_VSEL_MASK GENMASK(5, 0) | ||
| 40 | |||
| 41 | /* LDO regulator IDs */ | ||
| 42 | #define BCM590XX_REG_RFLDO 0 | ||
| 43 | #define BCM590XX_REG_CAMLDO1 1 | ||
| 44 | #define BCM590XX_REG_CAMLDO2 2 | ||
| 45 | #define BCM590XX_REG_SIMLDO1 3 | ||
| 46 | #define BCM590XX_REG_SIMLDO2 4 | ||
| 47 | #define BCM590XX_REG_SDLDO 5 | ||
| 48 | #define BCM590XX_REG_SDXLDO 6 | ||
| 49 | #define BCM590XX_REG_MMCLDO1 7 | ||
| 50 | #define BCM590XX_REG_MMCLDO2 8 | ||
| 51 | #define BCM590XX_REG_AUDLDO 9 | ||
| 52 | #define BCM590XX_REG_MICLDO 10 | ||
| 53 | #define BCM590XX_REG_USBLDO 11 | ||
| 54 | #define BCM590XX_REG_VIBLDO 12 | ||
| 55 | |||
| 56 | /* DCDC regulator IDs */ | ||
| 57 | #define BCM590XX_REG_CSR 13 | ||
| 58 | #define BCM590XX_REG_IOSR1 14 | ||
| 59 | #define BCM590XX_REG_IOSR2 15 | ||
| 60 | #define BCM590XX_REG_MSR 16 | ||
| 61 | #define BCM590XX_REG_SDSR1 17 | ||
| 62 | #define BCM590XX_REG_SDSR2 18 | ||
| 63 | #define BCM590XX_REG_VSR 19 | ||
| 64 | |||
| 65 | #define BCM590XX_NUM_REGS 20 | ||
| 66 | |||
| 67 | #define BCM590XX_REG_IS_LDO(n) (n < BCM590XX_REG_CSR) | ||
| 68 | |||
| 69 | struct bcm590xx_board { | ||
| 70 | struct regulator_init_data *bcm590xx_pmu_init_data[BCM590XX_NUM_REGS]; | ||
| 71 | }; | ||
| 72 | |||
| 73 | /* LDO group A: supported voltages in microvolts */ | ||
| 74 | static const unsigned int ldo_a_table[] = { | ||
| 75 | 1200000, 1800000, 2500000, 2700000, 2800000, | ||
| 76 | 2900000, 3000000, 3300000, | ||
| 77 | }; | ||
| 78 | |||
| 79 | /* LDO group C: supported voltages in microvolts */ | ||
| 80 | static const unsigned int ldo_c_table[] = { | ||
| 81 | 3100000, 1800000, 2500000, 2700000, 2800000, | ||
| 82 | 2900000, 3000000, 3300000, | ||
| 83 | }; | ||
| 84 | |||
| 85 | /* DCDC group CSR: supported voltages in microvolts */ | ||
| 86 | static const struct regulator_linear_range dcdc_csr_ranges[] = { | ||
| 87 | REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000), | ||
| 88 | REGULATOR_LINEAR_RANGE(1360000, 51, 55, 20000), | ||
| 89 | REGULATOR_LINEAR_RANGE(900000, 56, 63, 0), | ||
| 90 | }; | ||
| 91 | |||
| 92 | /* DCDC group IOSR1: supported voltages in microvolts */ | ||
| 93 | static const struct regulator_linear_range dcdc_iosr1_ranges[] = { | ||
| 94 | REGULATOR_LINEAR_RANGE(860000, 2, 51, 10000), | ||
| 95 | REGULATOR_LINEAR_RANGE(1500000, 52, 52, 0), | ||
| 96 | REGULATOR_LINEAR_RANGE(1800000, 53, 53, 0), | ||
| 97 | REGULATOR_LINEAR_RANGE(900000, 54, 63, 0), | ||
| 98 | }; | ||
| 99 | |||
| 100 | /* DCDC group SDSR1: supported voltages in microvolts */ | ||
| 101 | static const struct regulator_linear_range dcdc_sdsr1_ranges[] = { | ||
| 102 | REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000), | ||
| 103 | REGULATOR_LINEAR_RANGE(1340000, 51, 51, 0), | ||
| 104 | REGULATOR_LINEAR_RANGE(900000, 52, 63, 0), | ||
| 105 | }; | ||
| 106 | |||
| 107 | struct bcm590xx_info { | ||
| 108 | const char *name; | ||
| 109 | const char *vin_name; | ||
| 110 | u8 n_voltages; | ||
| 111 | const unsigned int *volt_table; | ||
| 112 | u8 n_linear_ranges; | ||
| 113 | const struct regulator_linear_range *linear_ranges; | ||
| 114 | }; | ||
| 115 | |||
| 116 | #define BCM590XX_REG_TABLE(_name, _table) \ | ||
| 117 | { \ | ||
| 118 | .name = #_name, \ | ||
| 119 | .n_voltages = ARRAY_SIZE(_table), \ | ||
| 120 | .volt_table = _table, \ | ||
| 121 | } | ||
| 122 | |||
| 123 | #define BCM590XX_REG_RANGES(_name, _ranges) \ | ||
| 124 | { \ | ||
| 125 | .name = #_name, \ | ||
| 126 | .n_linear_ranges = ARRAY_SIZE(_ranges), \ | ||
| 127 | .linear_ranges = _ranges, \ | ||
| 128 | } | ||
| 129 | |||
| 130 | static struct bcm590xx_info bcm590xx_regs[] = { | ||
| 131 | BCM590XX_REG_TABLE(rfldo, ldo_a_table), | ||
| 132 | BCM590XX_REG_TABLE(camldo1, ldo_c_table), | ||
| 133 | BCM590XX_REG_TABLE(camldo2, ldo_c_table), | ||
| 134 | BCM590XX_REG_TABLE(simldo1, ldo_a_table), | ||
| 135 | BCM590XX_REG_TABLE(simldo2, ldo_a_table), | ||
| 136 | BCM590XX_REG_TABLE(sdldo, ldo_c_table), | ||
| 137 | BCM590XX_REG_TABLE(sdxldo, ldo_a_table), | ||
| 138 | BCM590XX_REG_TABLE(mmcldo1, ldo_a_table), | ||
| 139 | BCM590XX_REG_TABLE(mmcldo2, ldo_a_table), | ||
| 140 | BCM590XX_REG_TABLE(audldo, ldo_a_table), | ||
| 141 | BCM590XX_REG_TABLE(micldo, ldo_a_table), | ||
| 142 | BCM590XX_REG_TABLE(usbldo, ldo_a_table), | ||
| 143 | BCM590XX_REG_TABLE(vibldo, ldo_c_table), | ||
| 144 | BCM590XX_REG_RANGES(csr, dcdc_csr_ranges), | ||
| 145 | BCM590XX_REG_RANGES(iosr1, dcdc_iosr1_ranges), | ||
| 146 | BCM590XX_REG_RANGES(iosr2, dcdc_iosr1_ranges), | ||
| 147 | BCM590XX_REG_RANGES(msr, dcdc_iosr1_ranges), | ||
| 148 | BCM590XX_REG_RANGES(sdsr1, dcdc_sdsr1_ranges), | ||
| 149 | BCM590XX_REG_RANGES(sdsr2, dcdc_iosr1_ranges), | ||
| 150 | BCM590XX_REG_RANGES(vsr, dcdc_iosr1_ranges), | ||
| 151 | }; | ||
| 152 | |||
| 153 | struct bcm590xx_reg { | ||
| 154 | struct regulator_desc *desc; | ||
| 155 | struct bcm590xx *mfd; | ||
| 156 | struct bcm590xx_info **info; | ||
| 157 | }; | ||
| 158 | |||
| 159 | static int bcm590xx_get_vsel_register(int id) | ||
| 160 | { | ||
| 161 | if (BCM590XX_REG_IS_LDO(id)) | ||
| 162 | return BCM590XX_RFLDOCTRL + id; | ||
| 163 | else | ||
| 164 | return BCM590XX_CSRVOUT1 + (id - BCM590XX_REG_CSR) * 3; | ||
| 165 | } | ||
| 166 | |||
| 167 | static int bcm590xx_get_enable_register(int id) | ||
| 168 | { | ||
| 169 | int reg = 0; | ||
| 170 | |||
| 171 | if (BCM590XX_REG_IS_LDO(id)) | ||
| 172 | reg = BCM590XX_RFLDOPMCTRL1 + id * 2; | ||
| 173 | else | ||
| 174 | switch (id) { | ||
| 175 | case BCM590XX_REG_CSR: | ||
| 176 | reg = BCM590XX_CSRPMCTRL1; | ||
| 177 | break; | ||
| 178 | case BCM590XX_REG_IOSR1: | ||
| 179 | reg = BCM590XX_IOSR1PMCTRL1; | ||
| 180 | break; | ||
| 181 | case BCM590XX_REG_IOSR2: | ||
| 182 | reg = BCM590XX_IOSR2PMCTRL1; | ||
| 183 | break; | ||
| 184 | case BCM590XX_REG_MSR: | ||
| 185 | reg = BCM590XX_MSRPMCTRL1; | ||
| 186 | break; | ||
| 187 | case BCM590XX_REG_SDSR1: | ||
| 188 | reg = BCM590XX_SDSR1PMCTRL1; | ||
| 189 | break; | ||
| 190 | case BCM590XX_REG_SDSR2: | ||
| 191 | reg = BCM590XX_SDSR2PMCTRL1; | ||
| 192 | break; | ||
| 193 | }; | ||
| 194 | |||
| 195 | return reg; | ||
| 196 | } | ||
| 197 | |||
| 198 | static struct regulator_ops bcm590xx_ops_ldo = { | ||
| 199 | .is_enabled = regulator_is_enabled_regmap, | ||
| 200 | .enable = regulator_enable_regmap, | ||
| 201 | .disable = regulator_disable_regmap, | ||
| 202 | .get_voltage_sel = regulator_get_voltage_sel_regmap, | ||
| 203 | .set_voltage_sel = regulator_set_voltage_sel_regmap, | ||
| 204 | .list_voltage = regulator_list_voltage_table, | ||
| 205 | .map_voltage = regulator_map_voltage_iterate, | ||
| 206 | }; | ||
| 207 | |||
| 208 | static struct regulator_ops bcm590xx_ops_dcdc = { | ||
| 209 | .is_enabled = regulator_is_enabled_regmap, | ||
| 210 | .enable = regulator_enable_regmap, | ||
| 211 | .disable = regulator_disable_regmap, | ||
| 212 | .get_voltage_sel = regulator_get_voltage_sel_regmap, | ||
| 213 | .set_voltage_sel = regulator_set_voltage_sel_regmap, | ||
| 214 | .list_voltage = regulator_list_voltage_linear_range, | ||
| 215 | .map_voltage = regulator_map_voltage_linear_range, | ||
| 216 | }; | ||
| 217 | |||
| 218 | #define BCM590XX_MATCH(_name, _id) \ | ||
| 219 | { \ | ||
| 220 | .name = #_name, \ | ||
| 221 | .driver_data = (void *)&bcm590xx_regs[BCM590XX_REG_##_id], \ | ||
| 222 | } | ||
| 223 | |||
| 224 | static struct of_regulator_match bcm590xx_matches[] = { | ||
| 225 | BCM590XX_MATCH(rfldo, RFLDO), | ||
| 226 | BCM590XX_MATCH(camldo1, CAMLDO1), | ||
| 227 | BCM590XX_MATCH(camldo2, CAMLDO2), | ||
| 228 | BCM590XX_MATCH(simldo1, SIMLDO1), | ||
| 229 | BCM590XX_MATCH(simldo2, SIMLDO2), | ||
| 230 | BCM590XX_MATCH(sdldo, SDLDO), | ||
| 231 | BCM590XX_MATCH(sdxldo, SDXLDO), | ||
| 232 | BCM590XX_MATCH(mmcldo1, MMCLDO1), | ||
| 233 | BCM590XX_MATCH(mmcldo2, MMCLDO2), | ||
| 234 | BCM590XX_MATCH(audldo, AUDLDO), | ||
| 235 | BCM590XX_MATCH(micldo, MICLDO), | ||
| 236 | BCM590XX_MATCH(usbldo, USBLDO), | ||
| 237 | BCM590XX_MATCH(vibldo, VIBLDO), | ||
| 238 | BCM590XX_MATCH(csr, CSR), | ||
| 239 | BCM590XX_MATCH(iosr1, IOSR1), | ||
| 240 | BCM590XX_MATCH(iosr2, IOSR2), | ||
| 241 | BCM590XX_MATCH(msr, MSR), | ||
| 242 | BCM590XX_MATCH(sdsr1, SDSR1), | ||
| 243 | BCM590XX_MATCH(sdsr2, SDSR2), | ||
| 244 | BCM590XX_MATCH(vsr, VSR), | ||
| 245 | }; | ||
| 246 | |||
| 247 | static struct bcm590xx_board *bcm590xx_parse_dt_reg_data( | ||
| 248 | struct platform_device *pdev, | ||
| 249 | struct of_regulator_match **bcm590xx_reg_matches) | ||
| 250 | { | ||
| 251 | struct bcm590xx_board *data; | ||
| 252 | struct device_node *np = pdev->dev.parent->of_node; | ||
| 253 | struct device_node *regulators; | ||
| 254 | struct of_regulator_match *matches = bcm590xx_matches; | ||
| 255 | int count = ARRAY_SIZE(bcm590xx_matches); | ||
| 256 | int idx = 0; | ||
| 257 | int ret; | ||
| 258 | |||
| 259 | if (!np) { | ||
| 260 | dev_err(&pdev->dev, "of node not found\n"); | ||
| 261 | return NULL; | ||
| 262 | } | ||
| 263 | |||
| 264 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); | ||
| 265 | if (!data) { | ||
| 266 | dev_err(&pdev->dev, "failed to allocate regulator board data\n"); | ||
| 267 | return NULL; | ||
| 268 | } | ||
| 269 | |||
| 270 | np = of_node_get(np); | ||
| 271 | regulators = of_get_child_by_name(np, "regulators"); | ||
| 272 | if (!regulators) { | ||
| 273 | dev_warn(&pdev->dev, "regulator node not found\n"); | ||
| 274 | return NULL; | ||
| 275 | } | ||
| 276 | |||
| 277 | ret = of_regulator_match(&pdev->dev, regulators, matches, count); | ||
| 278 | of_node_put(regulators); | ||
| 279 | if (ret < 0) { | ||
| 280 | dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", | ||
| 281 | ret); | ||
| 282 | return NULL; | ||
| 283 | } | ||
| 284 | |||
| 285 | *bcm590xx_reg_matches = matches; | ||
| 286 | |||
| 287 | for (idx = 0; idx < count; idx++) { | ||
| 288 | if (!matches[idx].init_data || !matches[idx].of_node) | ||
| 289 | continue; | ||
| 290 | |||
| 291 | data->bcm590xx_pmu_init_data[idx] = matches[idx].init_data; | ||
| 292 | } | ||
| 293 | |||
| 294 | return data; | ||
| 295 | } | ||
| 296 | |||
| 297 | static int bcm590xx_probe(struct platform_device *pdev) | ||
| 298 | { | ||
| 299 | struct bcm590xx *bcm590xx = dev_get_drvdata(pdev->dev.parent); | ||
| 300 | struct bcm590xx_board *pmu_data = NULL; | ||
| 301 | struct bcm590xx_reg *pmu; | ||
| 302 | struct regulator_config config = { }; | ||
| 303 | struct bcm590xx_info *info; | ||
| 304 | struct regulator_init_data *reg_data; | ||
| 305 | struct regulator_dev *rdev; | ||
| 306 | struct of_regulator_match *bcm590xx_reg_matches = NULL; | ||
| 307 | int i; | ||
| 308 | |||
| 309 | pmu_data = bcm590xx_parse_dt_reg_data(pdev, | ||
| 310 | &bcm590xx_reg_matches); | ||
| 311 | |||
| 312 | pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); | ||
| 313 | if (!pmu) { | ||
| 314 | dev_err(&pdev->dev, "Memory allocation failed for pmu\n"); | ||
| 315 | return -ENOMEM; | ||
| 316 | } | ||
| 317 | |||
| 318 | pmu->mfd = bcm590xx; | ||
| 319 | |||
| 320 | platform_set_drvdata(pdev, pmu); | ||
| 321 | |||
| 322 | pmu->desc = devm_kzalloc(&pdev->dev, BCM590XX_NUM_REGS * | ||
| 323 | sizeof(struct regulator_desc), GFP_KERNEL); | ||
| 324 | if (!pmu->desc) { | ||
| 325 | dev_err(&pdev->dev, "Memory alloc fails for desc\n"); | ||
| 326 | return -ENOMEM; | ||
| 327 | } | ||
| 328 | |||
| 329 | pmu->info = devm_kzalloc(&pdev->dev, BCM590XX_NUM_REGS * | ||
| 330 | sizeof(struct bcm590xx_info *), GFP_KERNEL); | ||
| 331 | if (!pmu->info) { | ||
| 332 | dev_err(&pdev->dev, "Memory alloc fails for info\n"); | ||
| 333 | return -ENOMEM; | ||
| 334 | } | ||
| 335 | |||
| 336 | info = bcm590xx_regs; | ||
| 337 | |||
| 338 | for (i = 0; i < BCM590XX_NUM_REGS; i++, info++) { | ||
| 339 | if (pmu_data) | ||
| 340 | reg_data = pmu_data->bcm590xx_pmu_init_data[i]; | ||
| 341 | else | ||
| 342 | reg_data = NULL; | ||
| 343 | |||
| 344 | /* Register the regulators */ | ||
| 345 | pmu->info[i] = info; | ||
| 346 | |||
| 347 | pmu->desc[i].name = info->name; | ||
| 348 | pmu->desc[i].supply_name = info->vin_name; | ||
| 349 | pmu->desc[i].id = i; | ||
| 350 | pmu->desc[i].volt_table = info->volt_table; | ||
| 351 | pmu->desc[i].n_voltages = info->n_voltages; | ||
| 352 | pmu->desc[i].linear_ranges = info->linear_ranges; | ||
| 353 | pmu->desc[i].n_linear_ranges = info->n_linear_ranges; | ||
| 354 | |||
| 355 | if (BCM590XX_REG_IS_LDO(i)) { | ||
| 356 | pmu->desc[i].ops = &bcm590xx_ops_ldo; | ||
| 357 | pmu->desc[i].vsel_mask = BCM590XX_LDO_VSEL_MASK; | ||
| 358 | } else { | ||
| 359 | pmu->desc[i].ops = &bcm590xx_ops_dcdc; | ||
| 360 | pmu->desc[i].vsel_mask = BCM590XX_SR_VSEL_MASK; | ||
| 361 | } | ||
| 362 | |||
| 363 | pmu->desc[i].vsel_reg = bcm590xx_get_vsel_register(i); | ||
| 364 | pmu->desc[i].enable_is_inverted = true; | ||
| 365 | pmu->desc[i].enable_mask = BCM590XX_REG_ENABLE; | ||
| 366 | pmu->desc[i].enable_reg = bcm590xx_get_enable_register(i); | ||
| 367 | pmu->desc[i].type = REGULATOR_VOLTAGE; | ||
| 368 | pmu->desc[i].owner = THIS_MODULE; | ||
| 369 | |||
| 370 | config.dev = bcm590xx->dev; | ||
| 371 | config.init_data = reg_data; | ||
| 372 | config.driver_data = pmu; | ||
| 373 | config.regmap = bcm590xx->regmap; | ||
| 374 | |||
| 375 | if (bcm590xx_reg_matches) | ||
| 376 | config.of_node = bcm590xx_reg_matches[i].of_node; | ||
| 377 | |||
| 378 | rdev = devm_regulator_register(&pdev->dev, &pmu->desc[i], | ||
| 379 | &config); | ||
| 380 | if (IS_ERR(rdev)) { | ||
| 381 | dev_err(bcm590xx->dev, | ||
| 382 | "failed to register %s regulator\n", | ||
| 383 | pdev->name); | ||
| 384 | return PTR_ERR(rdev); | ||
| 385 | } | ||
| 386 | } | ||
| 387 | |||
| 388 | return 0; | ||
| 389 | } | ||
| 390 | |||
| 391 | static struct platform_driver bcm590xx_regulator_driver = { | ||
| 392 | .driver = { | ||
| 393 | .name = "bcm590xx-vregs", | ||
| 394 | .owner = THIS_MODULE, | ||
| 395 | }, | ||
| 396 | .probe = bcm590xx_probe, | ||
| 397 | }; | ||
| 398 | module_platform_driver(bcm590xx_regulator_driver); | ||
| 399 | |||
| 400 | MODULE_AUTHOR("Matt Porter <mporter@linaro.org>"); | ||
| 401 | MODULE_DESCRIPTION("BCM590xx voltage regulator driver"); | ||
| 402 | MODULE_LICENSE("GPL v2"); | ||
| 403 | MODULE_ALIAS("platform:bcm590xx-vregs"); | ||
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index d1ac4caaf1b0..bac485acc7f3 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
| @@ -953,6 +953,8 @@ static int machine_constraints_current(struct regulator_dev *rdev, | |||
| 953 | return 0; | 953 | return 0; |
| 954 | } | 954 | } |
| 955 | 955 | ||
| 956 | static int _regulator_do_enable(struct regulator_dev *rdev); | ||
| 957 | |||
| 956 | /** | 958 | /** |
| 957 | * set_machine_constraints - sets regulator constraints | 959 | * set_machine_constraints - sets regulator constraints |
| 958 | * @rdev: regulator source | 960 | * @rdev: regulator source |
| @@ -1013,10 +1015,9 @@ static int set_machine_constraints(struct regulator_dev *rdev, | |||
| 1013 | /* If the constraints say the regulator should be on at this point | 1015 | /* If the constraints say the regulator should be on at this point |
| 1014 | * and we have control then make sure it is enabled. | 1016 | * and we have control then make sure it is enabled. |
| 1015 | */ | 1017 | */ |
| 1016 | if ((rdev->constraints->always_on || rdev->constraints->boot_on) && | 1018 | if (rdev->constraints->always_on || rdev->constraints->boot_on) { |
| 1017 | ops->enable) { | 1019 | ret = _regulator_do_enable(rdev); |
| 1018 | ret = ops->enable(rdev); | 1020 | if (ret < 0 && ret != -EINVAL) { |
| 1019 | if (ret < 0) { | ||
| 1020 | rdev_err(rdev, "failed to enable\n"); | 1021 | rdev_err(rdev, "failed to enable\n"); |
| 1021 | goto out; | 1022 | goto out; |
| 1022 | } | 1023 | } |
| @@ -1907,8 +1908,6 @@ static int _regulator_do_disable(struct regulator_dev *rdev) | |||
| 1907 | 1908 | ||
| 1908 | trace_regulator_disable_complete(rdev_get_name(rdev)); | 1909 | trace_regulator_disable_complete(rdev_get_name(rdev)); |
| 1909 | 1910 | ||
| 1910 | _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, | ||
| 1911 | NULL); | ||
| 1912 | return 0; | 1911 | return 0; |
| 1913 | } | 1912 | } |
| 1914 | 1913 | ||
| @@ -1932,6 +1931,8 @@ static int _regulator_disable(struct regulator_dev *rdev) | |||
| 1932 | rdev_err(rdev, "failed to disable\n"); | 1931 | rdev_err(rdev, "failed to disable\n"); |
| 1933 | return ret; | 1932 | return ret; |
| 1934 | } | 1933 | } |
| 1934 | _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, | ||
| 1935 | NULL); | ||
| 1935 | } | 1936 | } |
| 1936 | 1937 | ||
| 1937 | rdev->use_count = 0; | 1938 | rdev->use_count = 0; |
| @@ -1984,20 +1985,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev) | |||
| 1984 | { | 1985 | { |
| 1985 | int ret = 0; | 1986 | int ret = 0; |
| 1986 | 1987 | ||
| 1987 | /* force disable */ | 1988 | ret = _regulator_do_disable(rdev); |
| 1988 | if (rdev->desc->ops->disable) { | 1989 | if (ret < 0) { |
| 1989 | /* ah well, who wants to live forever... */ | 1990 | rdev_err(rdev, "failed to force disable\n"); |
| 1990 | ret = rdev->desc->ops->disable(rdev); | 1991 | return ret; |
| 1991 | if (ret < 0) { | ||
| 1992 | rdev_err(rdev, "failed to force disable\n"); | ||
| 1993 | return ret; | ||
| 1994 | } | ||
| 1995 | /* notify other consumers that power has been forced off */ | ||
| 1996 | _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | | ||
| 1997 | REGULATOR_EVENT_DISABLE, NULL); | ||
| 1998 | } | 1992 | } |
| 1999 | 1993 | ||
| 2000 | return ret; | 1994 | _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | |
| 1995 | REGULATOR_EVENT_DISABLE, NULL); | ||
| 1996 | |||
| 1997 | return 0; | ||
| 2001 | } | 1998 | } |
| 2002 | 1999 | ||
| 2003 | /** | 2000 | /** |
| @@ -2402,6 +2399,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV) | |||
| 2402 | struct regulator_dev *rdev = regulator->rdev; | 2399 | struct regulator_dev *rdev = regulator->rdev; |
| 2403 | int ret = 0; | 2400 | int ret = 0; |
| 2404 | int old_min_uV, old_max_uV; | 2401 | int old_min_uV, old_max_uV; |
| 2402 | int current_uV; | ||
| 2405 | 2403 | ||
| 2406 | mutex_lock(&rdev->mutex); | 2404 | mutex_lock(&rdev->mutex); |
| 2407 | 2405 | ||
| @@ -2412,6 +2410,19 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV) | |||
| 2412 | if (regulator->min_uV == min_uV && regulator->max_uV == max_uV) | 2410 | if (regulator->min_uV == min_uV && regulator->max_uV == max_uV) |
| 2413 | goto out; | 2411 | goto out; |
| 2414 | 2412 | ||
| 2413 | /* If we're trying to set a range that overlaps the current voltage, | ||
| 2414 | * return succesfully even though the regulator does not support | ||
| 2415 | * changing the voltage. | ||
| 2416 | */ | ||
| 2417 | if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { | ||
| 2418 | current_uV = _regulator_get_voltage(rdev); | ||
| 2419 | if (min_uV <= current_uV && current_uV <= max_uV) { | ||
| 2420 | regulator->min_uV = min_uV; | ||
| 2421 | regulator->max_uV = max_uV; | ||
| 2422 | goto out; | ||
| 2423 | } | ||
| 2424 | } | ||
| 2425 | |||
| 2415 | /* sanity check */ | 2426 | /* sanity check */ |
| 2416 | if (!rdev->desc->ops->set_voltage && | 2427 | if (!rdev->desc->ops->set_voltage && |
| 2417 | !rdev->desc->ops->set_voltage_sel) { | 2428 | !rdev->desc->ops->set_voltage_sel) { |
| @@ -3630,23 +3641,18 @@ int regulator_suspend_finish(void) | |||
| 3630 | 3641 | ||
| 3631 | mutex_lock(®ulator_list_mutex); | 3642 | mutex_lock(®ulator_list_mutex); |
| 3632 | list_for_each_entry(rdev, ®ulator_list, list) { | 3643 | list_for_each_entry(rdev, ®ulator_list, list) { |
| 3633 | struct regulator_ops *ops = rdev->desc->ops; | ||
| 3634 | |||
| 3635 | mutex_lock(&rdev->mutex); | 3644 | mutex_lock(&rdev->mutex); |
| 3636 | if ((rdev->use_count > 0 || rdev->constraints->always_on) && | 3645 | if (rdev->use_count > 0 || rdev->constraints->always_on) { |
| 3637 | ops->enable) { | 3646 | error = _regulator_do_enable(rdev); |
| 3638 | error = ops->enable(rdev); | ||
| 3639 | if (error) | 3647 | if (error) |
| 3640 | ret = error; | 3648 | ret = error; |
| 3641 | } else { | 3649 | } else { |
| 3642 | if (!have_full_constraints()) | 3650 | if (!have_full_constraints()) |
| 3643 | goto unlock; | 3651 | goto unlock; |
| 3644 | if (!ops->disable) | ||
| 3645 | goto unlock; | ||
| 3646 | if (!_regulator_is_enabled(rdev)) | 3652 | if (!_regulator_is_enabled(rdev)) |
| 3647 | goto unlock; | 3653 | goto unlock; |
| 3648 | 3654 | ||
| 3649 | error = ops->disable(rdev); | 3655 | error = _regulator_do_disable(rdev); |
| 3650 | if (error) | 3656 | if (error) |
| 3651 | ret = error; | 3657 | ret = error; |
| 3652 | } | 3658 | } |
| @@ -3820,7 +3826,7 @@ static int __init regulator_init_complete(void) | |||
| 3820 | ops = rdev->desc->ops; | 3826 | ops = rdev->desc->ops; |
| 3821 | c = rdev->constraints; | 3827 | c = rdev->constraints; |
| 3822 | 3828 | ||
| 3823 | if (!ops->disable || (c && c->always_on)) | 3829 | if (c && c->always_on) |
| 3824 | continue; | 3830 | continue; |
| 3825 | 3831 | ||
| 3826 | mutex_lock(&rdev->mutex); | 3832 | mutex_lock(&rdev->mutex); |
| @@ -3841,7 +3847,7 @@ static int __init regulator_init_complete(void) | |||
| 3841 | /* We log since this may kill the system if it | 3847 | /* We log since this may kill the system if it |
| 3842 | * goes wrong. */ | 3848 | * goes wrong. */ |
| 3843 | rdev_info(rdev, "disabling\n"); | 3849 | rdev_info(rdev, "disabling\n"); |
| 3844 | ret = ops->disable(rdev); | 3850 | ret = _regulator_do_disable(rdev); |
| 3845 | if (ret != 0) | 3851 | if (ret != 0) |
| 3846 | rdev_err(rdev, "couldn't disable: %d\n", ret); | 3852 | rdev_err(rdev, "couldn't disable: %d\n", ret); |
| 3847 | } else { | 3853 | } else { |
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c index 3adeaeffc485..fdb6ea8ae7e6 100644 --- a/drivers/regulator/da9052-regulator.c +++ b/drivers/regulator/da9052-regulator.c | |||
| @@ -240,6 +240,31 @@ static int da9052_regulator_set_voltage_sel(struct regulator_dev *rdev, | |||
| 240 | return ret; | 240 | return ret; |
| 241 | } | 241 | } |
| 242 | 242 | ||
| 243 | static int da9052_regulator_set_voltage_time_sel(struct regulator_dev *rdev, | ||
| 244 | unsigned int old_sel, | ||
| 245 | unsigned int new_sel) | ||
| 246 | { | ||
| 247 | struct da9052_regulator *regulator = rdev_get_drvdata(rdev); | ||
| 248 | struct da9052_regulator_info *info = regulator->info; | ||
| 249 | int id = rdev_get_id(rdev); | ||
| 250 | int ret = 0; | ||
| 251 | |||
| 252 | /* The DVC controlled LDOs and DCDCs ramp with 6.25mV/µs after enabling | ||
| 253 | * the activate bit. | ||
| 254 | */ | ||
| 255 | switch (id) { | ||
| 256 | case DA9052_ID_BUCK1: | ||
| 257 | case DA9052_ID_BUCK2: | ||
| 258 | case DA9052_ID_BUCK3: | ||
| 259 | case DA9052_ID_LDO2: | ||
| 260 | case DA9052_ID_LDO3: | ||
| 261 | ret = (new_sel - old_sel) * info->step_uV / 6250; | ||
| 262 | break; | ||
| 263 | } | ||
| 264 | |||
| 265 | return ret; | ||
| 266 | } | ||
| 267 | |||
| 243 | static struct regulator_ops da9052_dcdc_ops = { | 268 | static struct regulator_ops da9052_dcdc_ops = { |
| 244 | .get_current_limit = da9052_dcdc_get_current_limit, | 269 | .get_current_limit = da9052_dcdc_get_current_limit, |
| 245 | .set_current_limit = da9052_dcdc_set_current_limit, | 270 | .set_current_limit = da9052_dcdc_set_current_limit, |
| @@ -248,6 +273,7 @@ static struct regulator_ops da9052_dcdc_ops = { | |||
| 248 | .map_voltage = da9052_map_voltage, | 273 | .map_voltage = da9052_map_voltage, |
| 249 | .get_voltage_sel = regulator_get_voltage_sel_regmap, | 274 | .get_voltage_sel = regulator_get_voltage_sel_regmap, |
| 250 | .set_voltage_sel = da9052_regulator_set_voltage_sel, | 275 | .set_voltage_sel = da9052_regulator_set_voltage_sel, |
| 276 | .set_voltage_time_sel = da9052_regulator_set_voltage_time_sel, | ||
| 251 | .is_enabled = regulator_is_enabled_regmap, | 277 | .is_enabled = regulator_is_enabled_regmap, |
| 252 | .enable = regulator_enable_regmap, | 278 | .enable = regulator_enable_regmap, |
| 253 | .disable = regulator_disable_regmap, | 279 | .disable = regulator_disable_regmap, |
| @@ -258,6 +284,7 @@ static struct regulator_ops da9052_ldo_ops = { | |||
| 258 | .map_voltage = da9052_map_voltage, | 284 | .map_voltage = da9052_map_voltage, |
| 259 | .get_voltage_sel = regulator_get_voltage_sel_regmap, | 285 | .get_voltage_sel = regulator_get_voltage_sel_regmap, |
| 260 | .set_voltage_sel = da9052_regulator_set_voltage_sel, | 286 | .set_voltage_sel = da9052_regulator_set_voltage_sel, |
| 287 | .set_voltage_time_sel = da9052_regulator_set_voltage_time_sel, | ||
| 261 | .is_enabled = regulator_is_enabled_regmap, | 288 | .is_enabled = regulator_is_enabled_regmap, |
| 262 | .enable = regulator_enable_regmap, | 289 | .enable = regulator_enable_regmap, |
| 263 | .disable = regulator_disable_regmap, | 290 | .disable = regulator_disable_regmap, |
| @@ -401,7 +428,7 @@ static int da9052_regulator_probe(struct platform_device *pdev) | |||
| 401 | if (!nproot) | 428 | if (!nproot) |
| 402 | return -ENODEV; | 429 | return -ENODEV; |
| 403 | 430 | ||
| 404 | nproot = of_find_node_by_name(nproot, "regulators"); | 431 | nproot = of_get_child_by_name(nproot, "regulators"); |
| 405 | if (!nproot) | 432 | if (!nproot) |
| 406 | return -ENODEV; | 433 | return -ENODEV; |
| 407 | 434 | ||
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c index b14ebdad5dd2..9516317e1a9f 100644 --- a/drivers/regulator/da9055-regulator.c +++ b/drivers/regulator/da9055-regulator.c | |||
| @@ -19,6 +19,8 @@ | |||
| 19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
| 20 | #include <linux/regulator/driver.h> | 20 | #include <linux/regulator/driver.h> |
| 21 | #include <linux/regulator/machine.h> | 21 | #include <linux/regulator/machine.h> |
| 22 | #include <linux/of.h> | ||
| 23 | #include <linux/regulator/of_regulator.h> | ||
| 22 | 24 | ||
| 23 | #include <linux/mfd/da9055/core.h> | 25 | #include <linux/mfd/da9055/core.h> |
| 24 | #include <linux/mfd/da9055/reg.h> | 26 | #include <linux/mfd/da9055/reg.h> |
| @@ -446,6 +448,9 @@ static int da9055_gpio_init(struct da9055_regulator *regulator, | |||
| 446 | struct da9055_regulator_info *info = regulator->info; | 448 | struct da9055_regulator_info *info = regulator->info; |
| 447 | int ret = 0; | 449 | int ret = 0; |
| 448 | 450 | ||
| 451 | if (!pdata) | ||
| 452 | return 0; | ||
| 453 | |||
| 449 | if (pdata->gpio_ren && pdata->gpio_ren[id]) { | 454 | if (pdata->gpio_ren && pdata->gpio_ren[id]) { |
| 450 | char name[18]; | 455 | char name[18]; |
| 451 | int gpio_mux = pdata->gpio_ren[id]; | 456 | int gpio_mux = pdata->gpio_ren[id]; |
| @@ -530,6 +535,59 @@ static inline struct da9055_regulator_info *find_regulator_info(int id) | |||
| 530 | return NULL; | 535 | return NULL; |
| 531 | } | 536 | } |
| 532 | 537 | ||
| 538 | #ifdef CONFIG_OF | ||
| 539 | static struct of_regulator_match da9055_reg_matches[] = { | ||
| 540 | { .name = "BUCK1", }, | ||
| 541 | { .name = "BUCK2", }, | ||
| 542 | { .name = "LDO1", }, | ||
| 543 | { .name = "LDO2", }, | ||
| 544 | { .name = "LDO3", }, | ||
| 545 | { .name = "LDO4", }, | ||
| 546 | { .name = "LDO5", }, | ||
| 547 | { .name = "LDO6", }, | ||
| 548 | }; | ||
| 549 | |||
| 550 | static int da9055_regulator_dt_init(struct platform_device *pdev, | ||
| 551 | struct da9055_regulator *regulator, | ||
| 552 | struct regulator_config *config, | ||
| 553 | int regid) | ||
| 554 | { | ||
| 555 | struct device_node *nproot, *np; | ||
| 556 | int ret; | ||
| 557 | |||
| 558 | nproot = of_node_get(pdev->dev.parent->of_node); | ||
| 559 | if (!nproot) | ||
| 560 | return -ENODEV; | ||
| 561 | |||
| 562 | np = of_get_child_by_name(nproot, "regulators"); | ||
| 563 | if (!np) | ||
| 564 | return -ENODEV; | ||
| 565 | |||
| 566 | ret = of_regulator_match(&pdev->dev, np, &da9055_reg_matches[regid], 1); | ||
| 567 | of_node_put(nproot); | ||
| 568 | if (ret < 0) { | ||
| 569 | dev_err(&pdev->dev, "Error matching regulator: %d\n", ret); | ||
| 570 | return ret; | ||
| 571 | } | ||
| 572 | |||
| 573 | config->init_data = da9055_reg_matches[regid].init_data; | ||
| 574 | config->of_node = da9055_reg_matches[regid].of_node; | ||
| 575 | |||
| 576 | if (!config->of_node) | ||
| 577 | return -ENODEV; | ||
| 578 | |||
| 579 | return 0; | ||
| 580 | } | ||
| 581 | #else | ||
| 582 | static inline int da9055_regulator_dt_init(struct platform_device *pdev, | ||
| 583 | struct da9055_regulator *regulator, | ||
| 584 | struct regulator_config *config, | ||
| 585 | int regid) | ||
| 586 | { | ||
| 587 | return -ENODEV; | ||
| 588 | } | ||
| 589 | #endif /* CONFIG_OF */ | ||
| 590 | |||
| 533 | static int da9055_regulator_probe(struct platform_device *pdev) | 591 | static int da9055_regulator_probe(struct platform_device *pdev) |
| 534 | { | 592 | { |
| 535 | struct regulator_config config = { }; | 593 | struct regulator_config config = { }; |
| @@ -538,9 +596,6 @@ static int da9055_regulator_probe(struct platform_device *pdev) | |||
| 538 | struct da9055_pdata *pdata = dev_get_platdata(da9055->dev); | 596 | struct da9055_pdata *pdata = dev_get_platdata(da9055->dev); |
| 539 | int ret, irq; | 597 | int ret, irq; |
| 540 | 598 | ||
| 541 | if (pdata == NULL || pdata->regulators[pdev->id] == NULL) | ||
| 542 | return -ENODEV; | ||
| 543 | |||
| 544 | regulator = devm_kzalloc(&pdev->dev, sizeof(struct da9055_regulator), | 599 | regulator = devm_kzalloc(&pdev->dev, sizeof(struct da9055_regulator), |
| 545 | GFP_KERNEL); | 600 | GFP_KERNEL); |
| 546 | if (!regulator) | 601 | if (!regulator) |
| @@ -557,8 +612,14 @@ static int da9055_regulator_probe(struct platform_device *pdev) | |||
| 557 | config.driver_data = regulator; | 612 | config.driver_data = regulator; |
| 558 | config.regmap = da9055->regmap; | 613 | config.regmap = da9055->regmap; |
| 559 | 614 | ||
| 560 | if (pdata && pdata->regulators) | 615 | if (pdata && pdata->regulators) { |
| 561 | config.init_data = pdata->regulators[pdev->id]; | 616 | config.init_data = pdata->regulators[pdev->id]; |
| 617 | } else { | ||
| 618 | ret = da9055_regulator_dt_init(pdev, regulator, &config, | ||
| 619 | pdev->id); | ||
| 620 | if (ret < 0) | ||
| 621 | return ret; | ||
| 622 | } | ||
| 562 | 623 | ||
| 563 | ret = da9055_gpio_init(regulator, &config, pdata, pdev->id); | 624 | ret = da9055_gpio_init(regulator, &config, pdata, pdev->id); |
| 564 | if (ret < 0) | 625 | if (ret < 0) |
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c index 91e99a2c8dc1..7c9461d13313 100644 --- a/drivers/regulator/da9063-regulator.c +++ b/drivers/regulator/da9063-regulator.c | |||
| @@ -365,7 +365,7 @@ static int da9063_set_suspend_voltage(struct regulator_dev *rdev, int uV) | |||
| 365 | 365 | ||
| 366 | sel = regulator_map_voltage_linear(rdev, uV, uV); | 366 | sel = regulator_map_voltage_linear(rdev, uV, uV); |
| 367 | if (sel < 0) | 367 | if (sel < 0) |
| 368 | return -EINVAL; | 368 | return sel; |
| 369 | 369 | ||
| 370 | sel <<= ffs(rdev->desc->vsel_mask) - 1; | 370 | sel <<= ffs(rdev->desc->vsel_mask) - 1; |
| 371 | 371 | ||
| @@ -666,7 +666,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt( | |||
| 666 | struct device_node *node; | 666 | struct device_node *node; |
| 667 | int i, n, num; | 667 | int i, n, num; |
| 668 | 668 | ||
| 669 | node = of_find_node_by_name(pdev->dev.parent->of_node, "regulators"); | 669 | node = of_get_child_by_name(pdev->dev.parent->of_node, "regulators"); |
| 670 | if (!node) { | 670 | if (!node) { |
| 671 | dev_err(&pdev->dev, "Regulators device node not found\n"); | 671 | dev_err(&pdev->dev, "Regulators device node not found\n"); |
| 672 | return ERR_PTR(-ENODEV); | 672 | return ERR_PTR(-ENODEV); |
| @@ -674,6 +674,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt( | |||
| 674 | 674 | ||
| 675 | num = of_regulator_match(&pdev->dev, node, da9063_matches, | 675 | num = of_regulator_match(&pdev->dev, node, da9063_matches, |
| 676 | ARRAY_SIZE(da9063_matches)); | 676 | ARRAY_SIZE(da9063_matches)); |
| 677 | of_node_put(node); | ||
| 677 | if (num < 0) { | 678 | if (num < 0) { |
| 678 | dev_err(&pdev->dev, "Failed to match regulators\n"); | 679 | dev_err(&pdev->dev, "Failed to match regulators\n"); |
| 679 | return ERR_PTR(-EINVAL); | 680 | return ERR_PTR(-EINVAL); |
| @@ -710,7 +711,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt( | |||
| 710 | struct platform_device *pdev, | 711 | struct platform_device *pdev, |
| 711 | struct of_regulator_match **da9063_reg_matches) | 712 | struct of_regulator_match **da9063_reg_matches) |
| 712 | { | 713 | { |
| 713 | da9063_reg_matches = NULL; | 714 | *da9063_reg_matches = NULL; |
| 714 | return ERR_PTR(-ENODEV); | 715 | return ERR_PTR(-ENODEV); |
| 715 | } | 716 | } |
| 716 | #endif | 717 | #endif |
| @@ -756,7 +757,7 @@ static int da9063_regulator_probe(struct platform_device *pdev) | |||
| 756 | if (ret < 0) { | 757 | if (ret < 0) { |
| 757 | dev_err(&pdev->dev, | 758 | dev_err(&pdev->dev, |
| 758 | "Error while reading BUCKs configuration\n"); | 759 | "Error while reading BUCKs configuration\n"); |
| 759 | return -EIO; | 760 | return ret; |
| 760 | } | 761 | } |
| 761 | bcores_merged = val & DA9063_BCORE_MERGE; | 762 | bcores_merged = val & DA9063_BCORE_MERGE; |
| 762 | bmem_bio_merged = val & DA9063_BUCK_MERGE; | 763 | bmem_bio_merged = val & DA9063_BUCK_MERGE; |
| @@ -775,10 +776,8 @@ static int da9063_regulator_probe(struct platform_device *pdev) | |||
| 775 | size = sizeof(struct da9063_regulators) + | 776 | size = sizeof(struct da9063_regulators) + |
| 776 | n_regulators * sizeof(struct da9063_regulator); | 777 | n_regulators * sizeof(struct da9063_regulator); |
| 777 | regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); | 778 | regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); |
| 778 | if (!regulators) { | 779 | if (!regulators) |
| 779 | dev_err(&pdev->dev, "No memory for regulators\n"); | ||
| 780 | return -ENOMEM; | 780 | return -ENOMEM; |
| 781 | } | ||
| 782 | 781 | ||
| 783 | regulators->n_regulators = n_regulators; | 782 | regulators->n_regulators = n_regulators; |
| 784 | platform_set_drvdata(pdev, regulators); | 783 | platform_set_drvdata(pdev, regulators); |
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index 6f5ecbe1132e..7a320dd11c46 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c | |||
| @@ -134,11 +134,8 @@ static int da9210_i2c_probe(struct i2c_client *i2c, | |||
| 134 | int error; | 134 | int error; |
| 135 | 135 | ||
| 136 | chip = devm_kzalloc(&i2c->dev, sizeof(struct da9210), GFP_KERNEL); | 136 | chip = devm_kzalloc(&i2c->dev, sizeof(struct da9210), GFP_KERNEL); |
| 137 | if (NULL == chip) { | 137 | if (!chip) |
| 138 | dev_err(&i2c->dev, | ||
| 139 | "Cannot kzalloc memory for regulator structure\n"); | ||
| 140 | return -ENOMEM; | 138 | return -ENOMEM; |
| 141 | } | ||
| 142 | 139 | ||
| 143 | chip->regmap = devm_regmap_init_i2c(i2c, &da9210_regmap_config); | 140 | chip->regmap = devm_regmap_init_i2c(i2c, &da9210_regmap_config); |
| 144 | if (IS_ERR(chip->regmap)) { | 141 | if (IS_ERR(chip->regmap)) { |
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c index 846acf240e48..617c1adca816 100644 --- a/drivers/regulator/db8500-prcmu.c +++ b/drivers/regulator/db8500-prcmu.c | |||
| @@ -263,6 +263,8 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = { | |||
| 263 | .ops = &db8500_regulator_ops, | 263 | .ops = &db8500_regulator_ops, |
| 264 | .type = REGULATOR_VOLTAGE, | 264 | .type = REGULATOR_VOLTAGE, |
| 265 | .owner = THIS_MODULE, | 265 | .owner = THIS_MODULE, |
| 266 | .fixed_uV = 1800000, | ||
| 267 | .n_voltages = 1, | ||
| 266 | }, | 268 | }, |
| 267 | .exclude_from_power_state = true, | 269 | .exclude_from_power_state = true, |
| 268 | }, | 270 | }, |
diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c index ce89f7848a57..2d16b9f16de7 100644 --- a/drivers/regulator/dbx500-prcmu.c +++ b/drivers/regulator/dbx500-prcmu.c | |||
| @@ -78,6 +78,7 @@ static struct ux500_regulator_debug { | |||
| 78 | void ux500_regulator_suspend_debug(void) | 78 | void ux500_regulator_suspend_debug(void) |
| 79 | { | 79 | { |
| 80 | int i; | 80 | int i; |
| 81 | |||
| 81 | for (i = 0; i < rdebug.num_regulators; i++) | 82 | for (i = 0; i < rdebug.num_regulators; i++) |
| 82 | rdebug.state_before_suspend[i] = | 83 | rdebug.state_before_suspend[i] = |
| 83 | rdebug.regulator_array[i].is_enabled; | 84 | rdebug.regulator_array[i].is_enabled; |
| @@ -86,6 +87,7 @@ void ux500_regulator_suspend_debug(void) | |||
| 86 | void ux500_regulator_resume_debug(void) | 87 | void ux500_regulator_resume_debug(void) |
| 87 | { | 88 | { |
| 88 | int i; | 89 | int i; |
| 90 | |||
| 89 | for (i = 0; i < rdebug.num_regulators; i++) | 91 | for (i = 0; i < rdebug.num_regulators; i++) |
| 90 | rdebug.state_after_suspend[i] = | 92 | rdebug.state_after_suspend[i] = |
| 91 | rdebug.regulator_array[i].is_enabled; | 93 | rdebug.regulator_array[i].is_enabled; |
| @@ -127,9 +129,9 @@ static int ux500_regulator_status_print(struct seq_file *s, void *p) | |||
| 127 | int i; | 129 | int i; |
| 128 | 130 | ||
| 129 | /* print dump header */ | 131 | /* print dump header */ |
| 130 | err = seq_printf(s, "ux500-regulator status:\n"); | 132 | err = seq_puts(s, "ux500-regulator status:\n"); |
| 131 | if (err < 0) | 133 | if (err < 0) |
| 132 | dev_err(dev, "seq_printf overflow\n"); | 134 | dev_err(dev, "seq_puts overflow\n"); |
| 133 | 135 | ||
| 134 | err = seq_printf(s, "%31s : %8s : %8s\n", "current", | 136 | err = seq_printf(s, "%31s : %8s : %8s\n", "current", |
| 135 | "before", "after"); | 137 | "before", "after"); |
| @@ -202,18 +204,12 @@ ux500_regulator_debug_init(struct platform_device *pdev, | |||
| 202 | rdebug.num_regulators = num_regulators; | 204 | rdebug.num_regulators = num_regulators; |
| 203 | 205 | ||
| 204 | rdebug.state_before_suspend = kzalloc(num_regulators, GFP_KERNEL); | 206 | rdebug.state_before_suspend = kzalloc(num_regulators, GFP_KERNEL); |
| 205 | if (!rdebug.state_before_suspend) { | 207 | if (!rdebug.state_before_suspend) |
| 206 | dev_err(&pdev->dev, | ||
| 207 | "could not allocate memory for saving state\n"); | ||
| 208 | goto exit_destroy_power_state; | 208 | goto exit_destroy_power_state; |
| 209 | } | ||
| 210 | 209 | ||
| 211 | rdebug.state_after_suspend = kzalloc(num_regulators, GFP_KERNEL); | 210 | rdebug.state_after_suspend = kzalloc(num_regulators, GFP_KERNEL); |
| 212 | if (!rdebug.state_after_suspend) { | 211 | if (!rdebug.state_after_suspend) |
| 213 | dev_err(&pdev->dev, | ||
| 214 | "could not allocate memory for saving state\n"); | ||
| 215 | goto exit_free; | 212 | goto exit_free; |
| 216 | } | ||
| 217 | 213 | ||
| 218 | dbx500_regulator_testcase(regulator_info, num_regulators); | 214 | dbx500_regulator_testcase(regulator_info, num_regulators); |
| 219 | return 0; | 215 | return 0; |
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c index df9f42524abb..2436db9e2ca3 100644 --- a/drivers/regulator/dummy.c +++ b/drivers/regulator/dummy.c | |||
| @@ -25,7 +25,11 @@ | |||
| 25 | 25 | ||
| 26 | struct regulator_dev *dummy_regulator_rdev; | 26 | struct regulator_dev *dummy_regulator_rdev; |
| 27 | 27 | ||
| 28 | static struct regulator_init_data dummy_initdata; | 28 | static struct regulator_init_data dummy_initdata = { |
| 29 | .constraints = { | ||
| 30 | .always_on = 1, | ||
| 31 | }, | ||
| 32 | }; | ||
| 29 | 33 | ||
| 30 | static struct regulator_ops dummy_ops; | 34 | static struct regulator_ops dummy_ops; |
| 31 | 35 | ||
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c index 7ca3d9e3b0fe..714fd9a89aa1 100644 --- a/drivers/regulator/fan53555.c +++ b/drivers/regulator/fan53555.c | |||
| @@ -90,11 +90,11 @@ static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV) | |||
| 90 | return 0; | 90 | return 0; |
| 91 | ret = regulator_map_voltage_linear(rdev, uV, uV); | 91 | ret = regulator_map_voltage_linear(rdev, uV, uV); |
| 92 | if (ret < 0) | 92 | if (ret < 0) |
| 93 | return -EINVAL; | 93 | return ret; |
| 94 | ret = regmap_update_bits(di->regmap, di->sleep_reg, | 94 | ret = regmap_update_bits(di->regmap, di->sleep_reg, |
| 95 | VSEL_NSEL_MASK, ret); | 95 | VSEL_NSEL_MASK, ret); |
| 96 | if (ret < 0) | 96 | if (ret < 0) |
| 97 | return -EINVAL; | 97 | return ret; |
| 98 | /* Cache the sleep voltage setting. | 98 | /* Cache the sleep voltage setting. |
| 99 | * Might not be the real voltage which is rounded */ | 99 | * Might not be the real voltage which is rounded */ |
| 100 | di->sleep_vol_cache = uV; | 100 | di->sleep_vol_cache = uV; |
| @@ -244,10 +244,9 @@ static int fan53555_regulator_probe(struct i2c_client *client, | |||
| 244 | 244 | ||
| 245 | di = devm_kzalloc(&client->dev, sizeof(struct fan53555_device_info), | 245 | di = devm_kzalloc(&client->dev, sizeof(struct fan53555_device_info), |
| 246 | GFP_KERNEL); | 246 | GFP_KERNEL); |
| 247 | if (!di) { | 247 | if (!di) |
| 248 | dev_err(&client->dev, "Failed to allocate device info data!\n"); | ||
| 249 | return -ENOMEM; | 248 | return -ENOMEM; |
| 250 | } | 249 | |
| 251 | di->regmap = devm_regmap_init_i2c(client, &fan53555_regmap_config); | 250 | di->regmap = devm_regmap_init_i2c(client, &fan53555_regmap_config); |
| 252 | if (IS_ERR(di->regmap)) { | 251 | if (IS_ERR(di->regmap)) { |
| 253 | dev_err(&client->dev, "Failed to allocate regmap!\n"); | 252 | dev_err(&client->dev, "Failed to allocate regmap!\n"); |
| @@ -260,14 +259,14 @@ static int fan53555_regulator_probe(struct i2c_client *client, | |||
| 260 | ret = regmap_read(di->regmap, FAN53555_ID1, &val); | 259 | ret = regmap_read(di->regmap, FAN53555_ID1, &val); |
| 261 | if (ret < 0) { | 260 | if (ret < 0) { |
| 262 | dev_err(&client->dev, "Failed to get chip ID!\n"); | 261 | dev_err(&client->dev, "Failed to get chip ID!\n"); |
| 263 | return -ENODEV; | 262 | return ret; |
| 264 | } | 263 | } |
| 265 | di->chip_id = val & DIE_ID; | 264 | di->chip_id = val & DIE_ID; |
| 266 | /* Get chip revision */ | 265 | /* Get chip revision */ |
| 267 | ret = regmap_read(di->regmap, FAN53555_ID2, &val); | 266 | ret = regmap_read(di->regmap, FAN53555_ID2, &val); |
| 268 | if (ret < 0) { | 267 | if (ret < 0) { |
| 269 | dev_err(&client->dev, "Failed to get chip Rev!\n"); | 268 | dev_err(&client->dev, "Failed to get chip Rev!\n"); |
| 270 | return -ENODEV; | 269 | return ret; |
| 271 | } | 270 | } |
| 272 | di->chip_rev = val & DIE_REV; | 271 | di->chip_rev = val & DIE_REV; |
| 273 | dev_info(&client->dev, "FAN53555 Option[%d] Rev[%d] Detected!\n", | 272 | dev_info(&client->dev, "FAN53555 Option[%d] Rev[%d] Detected!\n", |
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c index 5ea64b94341c..c61f7e97e4f8 100644 --- a/drivers/regulator/fixed.c +++ b/drivers/regulator/fixed.c | |||
| @@ -130,17 +130,15 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) | |||
| 130 | 130 | ||
| 131 | drvdata = devm_kzalloc(&pdev->dev, sizeof(struct fixed_voltage_data), | 131 | drvdata = devm_kzalloc(&pdev->dev, sizeof(struct fixed_voltage_data), |
| 132 | GFP_KERNEL); | 132 | GFP_KERNEL); |
| 133 | if (drvdata == NULL) { | 133 | if (!drvdata) |
| 134 | dev_err(&pdev->dev, "Failed to allocate device data\n"); | 134 | return -ENOMEM; |
| 135 | ret = -ENOMEM; | ||
| 136 | goto err; | ||
| 137 | } | ||
| 138 | 135 | ||
| 139 | drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL); | 136 | drvdata->desc.name = devm_kstrdup(&pdev->dev, |
| 137 | config->supply_name, | ||
| 138 | GFP_KERNEL); | ||
| 140 | if (drvdata->desc.name == NULL) { | 139 | if (drvdata->desc.name == NULL) { |
| 141 | dev_err(&pdev->dev, "Failed to allocate supply name\n"); | 140 | dev_err(&pdev->dev, "Failed to allocate supply name\n"); |
| 142 | ret = -ENOMEM; | 141 | return -ENOMEM; |
| 143 | goto err; | ||
| 144 | } | 142 | } |
| 145 | drvdata->desc.type = REGULATOR_VOLTAGE; | 143 | drvdata->desc.type = REGULATOR_VOLTAGE; |
| 146 | drvdata->desc.owner = THIS_MODULE; | 144 | drvdata->desc.owner = THIS_MODULE; |
| @@ -149,13 +147,13 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) | |||
| 149 | drvdata->desc.enable_time = config->startup_delay; | 147 | drvdata->desc.enable_time = config->startup_delay; |
| 150 | 148 | ||
| 151 | if (config->input_supply) { | 149 | if (config->input_supply) { |
| 152 | drvdata->desc.supply_name = kstrdup(config->input_supply, | 150 | drvdata->desc.supply_name = devm_kstrdup(&pdev->dev, |
| 153 | GFP_KERNEL); | 151 | config->input_supply, |
| 152 | GFP_KERNEL); | ||
| 154 | if (!drvdata->desc.supply_name) { | 153 | if (!drvdata->desc.supply_name) { |
| 155 | dev_err(&pdev->dev, | 154 | dev_err(&pdev->dev, |
| 156 | "Failed to allocate input supply\n"); | 155 | "Failed to allocate input supply\n"); |
| 157 | ret = -ENOMEM; | 156 | return -ENOMEM; |
| 158 | goto err_name; | ||
| 159 | } | 157 | } |
| 160 | } | 158 | } |
| 161 | 159 | ||
| @@ -186,11 +184,12 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) | |||
| 186 | cfg.driver_data = drvdata; | 184 | cfg.driver_data = drvdata; |
| 187 | cfg.of_node = pdev->dev.of_node; | 185 | cfg.of_node = pdev->dev.of_node; |
| 188 | 186 | ||
| 189 | drvdata->dev = regulator_register(&drvdata->desc, &cfg); | 187 | drvdata->dev = devm_regulator_register(&pdev->dev, &drvdata->desc, |
| 188 | &cfg); | ||
| 190 | if (IS_ERR(drvdata->dev)) { | 189 | if (IS_ERR(drvdata->dev)) { |
| 191 | ret = PTR_ERR(drvdata->dev); | 190 | ret = PTR_ERR(drvdata->dev); |
| 192 | dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret); | 191 | dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret); |
| 193 | goto err_input; | 192 | return ret; |
| 194 | } | 193 | } |
| 195 | 194 | ||
| 196 | platform_set_drvdata(pdev, drvdata); | 195 | platform_set_drvdata(pdev, drvdata); |
| @@ -199,24 +198,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) | |||
| 199 | drvdata->desc.fixed_uV); | 198 | drvdata->desc.fixed_uV); |
| 200 | 199 | ||
| 201 | return 0; | 200 | return 0; |
| 202 | |||
| 203 | err_input: | ||
| 204 | kfree(drvdata->desc.supply_name); | ||
| 205 | err_name: | ||
| 206 | kfree(drvdata->desc.name); | ||
| 207 | err: | ||
| 208 | return ret; | ||
| 209 | } | ||
| 210 | |||
| 211 | static int reg_fixed_voltage_remove(struct platform_device *pdev) | ||
| 212 | { | ||
| 213 | struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev); | ||
| 214 | |||
| 215 | regulator_unregister(drvdata->dev); | ||
| 216 | kfree(drvdata->desc.supply_name); | ||
| 217 | kfree(drvdata->desc.name); | ||
| 218 | |||
| 219 | return 0; | ||
| 220 | } | 201 | } |
| 221 | 202 | ||
| 222 | #if defined(CONFIG_OF) | 203 | #if defined(CONFIG_OF) |
| @@ -229,7 +210,6 @@ MODULE_DEVICE_TABLE(of, fixed_of_match); | |||
| 229 | 210 | ||
| 230 | static struct platform_driver regulator_fixed_voltage_driver = { | 211 | static struct platform_driver regulator_fixed_voltage_driver = { |
| 231 | .probe = reg_fixed_voltage_probe, | 212 | .probe = reg_fixed_voltage_probe, |
| 232 | .remove = reg_fixed_voltage_remove, | ||
| 233 | .driver = { | 213 | .driver = { |
| 234 | .name = "reg-fixed-voltage", | 214 | .name = "reg-fixed-voltage", |
| 235 | .owner = THIS_MODULE, | 215 | .owner = THIS_MODULE, |
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index c0a1d00b78c9..989b23b377c0 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c | |||
| @@ -136,7 +136,6 @@ static struct gpio_regulator_config * | |||
| 136 | of_get_gpio_regulator_config(struct device *dev, struct device_node *np) | 136 | of_get_gpio_regulator_config(struct device *dev, struct device_node *np) |
| 137 | { | 137 | { |
| 138 | struct gpio_regulator_config *config; | 138 | struct gpio_regulator_config *config; |
| 139 | struct property *prop; | ||
| 140 | const char *regtype; | 139 | const char *regtype; |
| 141 | int proplen, gpio, i; | 140 | int proplen, gpio, i; |
| 142 | int ret; | 141 | int ret; |
| @@ -172,22 +171,35 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np) | |||
| 172 | if (!config->gpios) | 171 | if (!config->gpios) |
| 173 | return ERR_PTR(-ENOMEM); | 172 | return ERR_PTR(-ENOMEM); |
| 174 | 173 | ||
| 174 | proplen = of_property_count_u32_elems(np, "gpios-states"); | ||
| 175 | /* optional property */ | ||
| 176 | if (proplen < 0) | ||
| 177 | proplen = 0; | ||
| 178 | |||
| 179 | if (proplen > 0 && proplen != config->nr_gpios) { | ||
| 180 | dev_warn(dev, "gpios <-> gpios-states mismatch\n"); | ||
| 181 | proplen = 0; | ||
| 182 | } | ||
| 183 | |||
| 175 | for (i = 0; i < config->nr_gpios; i++) { | 184 | for (i = 0; i < config->nr_gpios; i++) { |
| 176 | gpio = of_get_named_gpio(np, "gpios", i); | 185 | gpio = of_get_named_gpio(np, "gpios", i); |
| 177 | if (gpio < 0) | 186 | if (gpio < 0) |
| 178 | break; | 187 | break; |
| 179 | config->gpios[i].gpio = gpio; | 188 | config->gpios[i].gpio = gpio; |
| 189 | if (proplen > 0) { | ||
| 190 | of_property_read_u32_index(np, "gpios-states", i, &ret); | ||
| 191 | if (ret) | ||
| 192 | config->gpios[i].flags = GPIOF_OUT_INIT_HIGH; | ||
| 193 | } | ||
| 180 | } | 194 | } |
| 181 | 195 | ||
| 182 | /* Fetch states. */ | 196 | /* Fetch states. */ |
| 183 | prop = of_find_property(np, "states", NULL); | 197 | proplen = of_property_count_u32_elems(np, "states"); |
| 184 | if (!prop) { | 198 | if (proplen < 0) { |
| 185 | dev_err(dev, "No 'states' property found\n"); | 199 | dev_err(dev, "No 'states' property found\n"); |
| 186 | return ERR_PTR(-EINVAL); | 200 | return ERR_PTR(-EINVAL); |
| 187 | } | 201 | } |
| 188 | 202 | ||
| 189 | proplen = prop->length / sizeof(int); | ||
| 190 | |||
| 191 | config->states = devm_kzalloc(dev, | 203 | config->states = devm_kzalloc(dev, |
| 192 | sizeof(struct gpio_regulator_state) | 204 | sizeof(struct gpio_regulator_state) |
| 193 | * (proplen / 2), | 205 | * (proplen / 2), |
| @@ -196,10 +208,10 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np) | |||
| 196 | return ERR_PTR(-ENOMEM); | 208 | return ERR_PTR(-ENOMEM); |
| 197 | 209 | ||
| 198 | for (i = 0; i < proplen / 2; i++) { | 210 | for (i = 0; i < proplen / 2; i++) { |
| 199 | config->states[i].value = | 211 | of_property_read_u32_index(np, "states", i * 2, |
| 200 | be32_to_cpup((int *)prop->value + (i * 2)); | 212 | &config->states[i].value); |
| 201 | config->states[i].gpios = | 213 | of_property_read_u32_index(np, "states", i * 2 + 1, |
| 202 | be32_to_cpup((int *)prop->value + (i * 2 + 1)); | 214 | &config->states[i].gpios); |
| 203 | } | 215 | } |
| 204 | config->nr_states = i; | 216 | config->nr_states = i; |
| 205 | 217 | ||
| @@ -239,10 +251,8 @@ static int gpio_regulator_probe(struct platform_device *pdev) | |||
| 239 | 251 | ||
| 240 | drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data), | 252 | drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data), |
| 241 | GFP_KERNEL); | 253 | GFP_KERNEL); |
| 242 | if (drvdata == NULL) { | 254 | if (drvdata == NULL) |
| 243 | dev_err(&pdev->dev, "Failed to allocate device data\n"); | ||
| 244 | return -ENOMEM; | 255 | return -ENOMEM; |
| 245 | } | ||
| 246 | 256 | ||
| 247 | drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL); | 257 | drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL); |
| 248 | if (drvdata->desc.name == NULL) { | 258 | if (drvdata->desc.name == NULL) { |
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c index e221a271ba56..cbc39096c78d 100644 --- a/drivers/regulator/helpers.c +++ b/drivers/regulator/helpers.c | |||
| @@ -37,10 +37,17 @@ int regulator_is_enabled_regmap(struct regulator_dev *rdev) | |||
| 37 | if (ret != 0) | 37 | if (ret != 0) |
| 38 | return ret; | 38 | return ret; |
| 39 | 39 | ||
| 40 | if (rdev->desc->enable_is_inverted) | 40 | val &= rdev->desc->enable_mask; |
| 41 | return (val & rdev->desc->enable_mask) == 0; | 41 | |
| 42 | else | 42 | if (rdev->desc->enable_is_inverted) { |
| 43 | return (val & rdev->desc->enable_mask) != 0; | 43 | if (rdev->desc->enable_val) |
| 44 | return val != rdev->desc->enable_val; | ||
| 45 | return val == 0; | ||
| 46 | } else { | ||
| 47 | if (rdev->desc->enable_val) | ||
| 48 | return val == rdev->desc->enable_val; | ||
| 49 | return val != 0; | ||
| 50 | } | ||
| 44 | } | 51 | } |
| 45 | EXPORT_SYMBOL_GPL(regulator_is_enabled_regmap); | 52 | EXPORT_SYMBOL_GPL(regulator_is_enabled_regmap); |
| 46 | 53 | ||
| @@ -57,10 +64,13 @@ int regulator_enable_regmap(struct regulator_dev *rdev) | |||
| 57 | { | 64 | { |
| 58 | unsigned int val; | 65 | unsigned int val; |
| 59 | 66 | ||
| 60 | if (rdev->desc->enable_is_inverted) | 67 | if (rdev->desc->enable_is_inverted) { |
| 61 | val = 0; | 68 | val = rdev->desc->disable_val; |
| 62 | else | 69 | } else { |
| 63 | val = rdev->desc->enable_mask; | 70 | val = rdev->desc->enable_val; |
| 71 | if (!val) | ||
| 72 | val = rdev->desc->enable_mask; | ||
| 73 | } | ||
| 64 | 74 | ||
| 65 | return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, | 75 | return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, |
| 66 | rdev->desc->enable_mask, val); | 76 | rdev->desc->enable_mask, val); |
| @@ -80,10 +90,13 @@ int regulator_disable_regmap(struct regulator_dev *rdev) | |||
| 80 | { | 90 | { |
| 81 | unsigned int val; | 91 | unsigned int val; |
| 82 | 92 | ||
| 83 | if (rdev->desc->enable_is_inverted) | 93 | if (rdev->desc->enable_is_inverted) { |
| 84 | val = rdev->desc->enable_mask; | 94 | val = rdev->desc->enable_val; |
| 85 | else | 95 | if (!val) |
| 86 | val = 0; | 96 | val = rdev->desc->enable_mask; |
| 97 | } else { | ||
| 98 | val = rdev->desc->disable_val; | ||
| 99 | } | ||
| 87 | 100 | ||
| 88 | return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, | 101 | return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, |
| 89 | rdev->desc->enable_mask, val); | 102 | rdev->desc->enable_mask, val); |
| @@ -419,10 +432,13 @@ int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable) | |||
| 419 | { | 432 | { |
| 420 | unsigned int val; | 433 | unsigned int val; |
| 421 | 434 | ||
| 422 | if (enable) | 435 | if (enable) { |
| 423 | val = rdev->desc->bypass_mask; | 436 | val = rdev->desc->bypass_val_on; |
| 424 | else | 437 | if (!val) |
| 425 | val = 0; | 438 | val = rdev->desc->bypass_mask; |
| 439 | } else { | ||
| 440 | val = rdev->desc->bypass_val_off; | ||
| 441 | } | ||
| 426 | 442 | ||
| 427 | return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg, | 443 | return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg, |
| 428 | rdev->desc->bypass_mask, val); | 444 | rdev->desc->bypass_mask, val); |
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index 3b1102b75071..66fd2330dca0 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c | |||
| @@ -327,7 +327,7 @@ static int lp3971_i2c_read(struct i2c_client *i2c, char reg, int count, | |||
| 327 | return -EIO; | 327 | return -EIO; |
| 328 | ret = i2c_smbus_read_byte_data(i2c, reg); | 328 | ret = i2c_smbus_read_byte_data(i2c, reg); |
| 329 | if (ret < 0) | 329 | if (ret < 0) |
| 330 | return -EIO; | 330 | return ret; |
| 331 | 331 | ||
| 332 | *dest = ret; | 332 | *dest = ret; |
| 333 | return 0; | 333 | return 0; |
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c index 2e4734ff79fc..2e022aabd951 100644 --- a/drivers/regulator/lp872x.c +++ b/drivers/regulator/lp872x.c | |||
| @@ -211,7 +211,7 @@ static int lp872x_get_timestep_usec(struct lp872x *lp) | |||
| 211 | 211 | ||
| 212 | ret = lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val); | 212 | ret = lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val); |
| 213 | if (ret) | 213 | if (ret) |
| 214 | return -EINVAL; | 214 | return ret; |
| 215 | 215 | ||
| 216 | val = (val & mask) >> shift; | 216 | val = (val & mask) >> shift; |
| 217 | if (val >= size) | 217 | if (val >= size) |
| @@ -229,7 +229,7 @@ static int lp872x_regulator_enable_time(struct regulator_dev *rdev) | |||
| 229 | u8 addr, val; | 229 | u8 addr, val; |
| 230 | 230 | ||
| 231 | if (time_step_us < 0) | 231 | if (time_step_us < 0) |
| 232 | return -EINVAL; | 232 | return time_step_us; |
| 233 | 233 | ||
| 234 | switch (rid) { | 234 | switch (rid) { |
| 235 | case LP8720_ID_LDO1 ... LP8720_ID_BUCK: | 235 | case LP8720_ID_LDO1 ... LP8720_ID_BUCK: |
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c index e0619526708c..ed60baaeceec 100644 --- a/drivers/regulator/max14577.c +++ b/drivers/regulator/max14577.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * max14577.c - Regulator driver for the Maxim 14577 | 2 | * max14577.c - Regulator driver for the Maxim 14577 |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2013 Samsung Electronics | 4 | * Copyright (C) 2013,2014 Samsung Electronics |
| 5 | * Krzysztof Kozlowski <k.kozlowski@samsung.com> | 5 | * Krzysztof Kozlowski <k.kozlowski@samsung.com> |
| 6 | * | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
| @@ -22,12 +22,6 @@ | |||
| 22 | #include <linux/mfd/max14577-private.h> | 22 | #include <linux/mfd/max14577-private.h> |
| 23 | #include <linux/regulator/of_regulator.h> | 23 | #include <linux/regulator/of_regulator.h> |
| 24 | 24 | ||
| 25 | struct max14577_regulator { | ||
| 26 | struct device *dev; | ||
| 27 | struct max14577 *max14577; | ||
| 28 | struct regulator_dev **regulators; | ||
| 29 | }; | ||
| 30 | |||
| 31 | static int max14577_reg_is_enabled(struct regulator_dev *rdev) | 25 | static int max14577_reg_is_enabled(struct regulator_dev *rdev) |
| 32 | { | 26 | { |
| 33 | int rid = rdev_get_id(rdev); | 27 | int rid = rdev_get_id(rdev); |
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c index e242dd316d36..d23d0577754b 100644 --- a/drivers/regulator/max1586.c +++ b/drivers/regulator/max1586.c | |||
| @@ -46,8 +46,6 @@ struct max1586_data { | |||
| 46 | 46 | ||
| 47 | unsigned int v3_curr_sel; | 47 | unsigned int v3_curr_sel; |
| 48 | unsigned int v6_curr_sel; | 48 | unsigned int v6_curr_sel; |
| 49 | |||
| 50 | struct regulator_dev *rdev[0]; | ||
| 51 | }; | 49 | }; |
| 52 | 50 | ||
| 53 | /* | 51 | /* |
| @@ -162,14 +160,12 @@ static struct regulator_desc max1586_reg[] = { | |||
| 162 | static int max1586_pmic_probe(struct i2c_client *client, | 160 | static int max1586_pmic_probe(struct i2c_client *client, |
| 163 | const struct i2c_device_id *i2c_id) | 161 | const struct i2c_device_id *i2c_id) |
| 164 | { | 162 | { |
| 165 | struct regulator_dev **rdev; | ||
| 166 | struct max1586_platform_data *pdata = dev_get_platdata(&client->dev); | 163 | struct max1586_platform_data *pdata = dev_get_platdata(&client->dev); |
| 167 | struct regulator_config config = { }; | 164 | struct regulator_config config = { }; |
| 168 | struct max1586_data *max1586; | 165 | struct max1586_data *max1586; |
| 169 | int i, id; | 166 | int i, id; |
| 170 | 167 | ||
| 171 | max1586 = devm_kzalloc(&client->dev, sizeof(struct max1586_data) + | 168 | max1586 = devm_kzalloc(&client->dev, sizeof(struct max1586_data), |
| 172 | sizeof(struct regulator_dev *) * (MAX1586_V6 + 1), | ||
| 173 | GFP_KERNEL); | 169 | GFP_KERNEL); |
| 174 | if (!max1586) | 170 | if (!max1586) |
| 175 | return -ENOMEM; | 171 | return -ENOMEM; |
| @@ -186,8 +182,9 @@ static int max1586_pmic_probe(struct i2c_client *client, | |||
| 186 | max1586->v3_curr_sel = 24; /* 1.3V */ | 182 | max1586->v3_curr_sel = 24; /* 1.3V */ |
| 187 | max1586->v6_curr_sel = 0; | 183 | max1586->v6_curr_sel = 0; |
| 188 | 184 | ||
| 189 | rdev = max1586->rdev; | ||
| 190 | for (i = 0; i < pdata->num_subdevs && i <= MAX1586_V6; i++) { | 185 | for (i = 0; i < pdata->num_subdevs && i <= MAX1586_V6; i++) { |
| 186 | struct regulator_dev *rdev; | ||
| 187 | |||
| 191 | id = pdata->subdevs[i].id; | 188 | id = pdata->subdevs[i].id; |
| 192 | if (!pdata->subdevs[i].platform_data) | 189 | if (!pdata->subdevs[i].platform_data) |
| 193 | continue; | 190 | continue; |
| @@ -207,12 +204,12 @@ static int max1586_pmic_probe(struct i2c_client *client, | |||
| 207 | config.init_data = pdata->subdevs[i].platform_data; | 204 | config.init_data = pdata->subdevs[i].platform_data; |
| 208 | config.driver_data = max1586; | 205 | config.driver_data = max1586; |
| 209 | 206 | ||
| 210 | rdev[i] = devm_regulator_register(&client->dev, | 207 | rdev = devm_regulator_register(&client->dev, |
| 211 | &max1586_reg[id], &config); | 208 | &max1586_reg[id], &config); |
| 212 | if (IS_ERR(rdev[i])) { | 209 | if (IS_ERR(rdev)) { |
| 213 | dev_err(&client->dev, "failed to register %s\n", | 210 | dev_err(&client->dev, "failed to register %s\n", |
| 214 | max1586_reg[id].name); | 211 | max1586_reg[id].name); |
| 215 | return PTR_ERR(rdev[i]); | 212 | return PTR_ERR(rdev); |
| 216 | } | 213 | } |
| 217 | } | 214 | } |
| 218 | 215 | ||
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c index ae001ccf26f4..ef1af2debbd2 100644 --- a/drivers/regulator/max77686.c +++ b/drivers/regulator/max77686.c | |||
| @@ -65,7 +65,6 @@ enum max77686_ramp_rate { | |||
| 65 | }; | 65 | }; |
| 66 | 66 | ||
| 67 | struct max77686_data { | 67 | struct max77686_data { |
| 68 | struct regulator_dev *rdev[MAX77686_REGULATORS]; | ||
| 69 | unsigned int opmode[MAX77686_REGULATORS]; | 68 | unsigned int opmode[MAX77686_REGULATORS]; |
| 70 | }; | 69 | }; |
| 71 | 70 | ||
| @@ -400,7 +399,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev, | |||
| 400 | unsigned int i; | 399 | unsigned int i; |
| 401 | 400 | ||
| 402 | pmic_np = iodev->dev->of_node; | 401 | pmic_np = iodev->dev->of_node; |
| 403 | regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators"); | 402 | regulators_np = of_get_child_by_name(pmic_np, "voltage-regulators"); |
| 404 | if (!regulators_np) { | 403 | if (!regulators_np) { |
| 405 | dev_err(&pdev->dev, "could not find regulators sub-node\n"); | 404 | dev_err(&pdev->dev, "could not find regulators sub-node\n"); |
| 406 | return -EINVAL; | 405 | return -EINVAL; |
| @@ -410,8 +409,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev, | |||
| 410 | rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) * | 409 | rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) * |
| 411 | pdata->num_regulators, GFP_KERNEL); | 410 | pdata->num_regulators, GFP_KERNEL); |
| 412 | if (!rdata) { | 411 | if (!rdata) { |
| 413 | dev_err(&pdev->dev, | 412 | of_node_put(regulators_np); |
| 414 | "could not allocate memory for regulator data\n"); | ||
| 415 | return -ENOMEM; | 413 | return -ENOMEM; |
| 416 | } | 414 | } |
| 417 | 415 | ||
| @@ -425,6 +423,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev, | |||
| 425 | } | 423 | } |
| 426 | 424 | ||
| 427 | pdata->regulators = rdata; | 425 | pdata->regulators = rdata; |
| 426 | of_node_put(regulators_np); | ||
| 428 | 427 | ||
| 429 | return 0; | 428 | return 0; |
| 430 | } | 429 | } |
| @@ -474,16 +473,18 @@ static int max77686_pmic_probe(struct platform_device *pdev) | |||
| 474 | platform_set_drvdata(pdev, max77686); | 473 | platform_set_drvdata(pdev, max77686); |
| 475 | 474 | ||
| 476 | for (i = 0; i < MAX77686_REGULATORS; i++) { | 475 | for (i = 0; i < MAX77686_REGULATORS; i++) { |
| 476 | struct regulator_dev *rdev; | ||
| 477 | |||
| 477 | config.init_data = pdata->regulators[i].initdata; | 478 | config.init_data = pdata->regulators[i].initdata; |
| 478 | config.of_node = pdata->regulators[i].of_node; | 479 | config.of_node = pdata->regulators[i].of_node; |
| 479 | 480 | ||
| 480 | max77686->opmode[i] = regulators[i].enable_mask; | 481 | max77686->opmode[i] = regulators[i].enable_mask; |
| 481 | max77686->rdev[i] = devm_regulator_register(&pdev->dev, | 482 | rdev = devm_regulator_register(&pdev->dev, |
| 482 | ®ulators[i], &config); | 483 | ®ulators[i], &config); |
| 483 | if (IS_ERR(max77686->rdev[i])) { | 484 | if (IS_ERR(rdev)) { |
| 484 | dev_err(&pdev->dev, | 485 | dev_err(&pdev->dev, |
| 485 | "regulator init failed for %d\n", i); | 486 | "regulator init failed for %d\n", i); |
| 486 | return PTR_ERR(max77686->rdev[i]); | 487 | return PTR_ERR(rdev); |
| 487 | } | 488 | } |
| 488 | } | 489 | } |
| 489 | 490 | ||
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c index 5fb899f461d0..653a58b49cdf 100644 --- a/drivers/regulator/max77693.c +++ b/drivers/regulator/max77693.c | |||
| @@ -34,13 +34,6 @@ | |||
| 34 | 34 | ||
| 35 | #define CHGIN_ILIM_STEP_20mA 20000 | 35 | #define CHGIN_ILIM_STEP_20mA 20000 |
| 36 | 36 | ||
| 37 | struct max77693_pmic_dev { | ||
| 38 | struct device *dev; | ||
| 39 | struct max77693_dev *iodev; | ||
| 40 | int num_regulators; | ||
| 41 | struct regulator_dev **rdev; | ||
| 42 | }; | ||
| 43 | |||
| 44 | /* CHARGER regulator ops */ | 37 | /* CHARGER regulator ops */ |
| 45 | /* CHARGER regulator uses two bits for enabling */ | 38 | /* CHARGER regulator uses two bits for enabling */ |
| 46 | static int max77693_chg_is_enabled(struct regulator_dev *rdev) | 39 | static int max77693_chg_is_enabled(struct regulator_dev *rdev) |
| @@ -170,19 +163,22 @@ static int max77693_pmic_dt_parse_rdata(struct device *dev, | |||
| 170 | struct max77693_regulator_data *tmp; | 163 | struct max77693_regulator_data *tmp; |
| 171 | int i, matched = 0; | 164 | int i, matched = 0; |
| 172 | 165 | ||
| 173 | np = of_find_node_by_name(dev->parent->of_node, "regulators"); | 166 | np = of_get_child_by_name(dev->parent->of_node, "regulators"); |
| 174 | if (!np) | 167 | if (!np) |
| 175 | return -EINVAL; | 168 | return -EINVAL; |
| 176 | 169 | ||
| 177 | rmatch = devm_kzalloc(dev, | 170 | rmatch = devm_kzalloc(dev, |
| 178 | sizeof(*rmatch) * ARRAY_SIZE(regulators), GFP_KERNEL); | 171 | sizeof(*rmatch) * ARRAY_SIZE(regulators), GFP_KERNEL); |
| 179 | if (!rmatch) | 172 | if (!rmatch) { |
| 173 | of_node_put(np); | ||
| 180 | return -ENOMEM; | 174 | return -ENOMEM; |
| 175 | } | ||
| 181 | 176 | ||
| 182 | for (i = 0; i < ARRAY_SIZE(regulators); i++) | 177 | for (i = 0; i < ARRAY_SIZE(regulators); i++) |
| 183 | rmatch[i].name = regulators[i].name; | 178 | rmatch[i].name = regulators[i].name; |
| 184 | 179 | ||
| 185 | matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(regulators)); | 180 | matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(regulators)); |
| 181 | of_node_put(np); | ||
| 186 | if (matched <= 0) | 182 | if (matched <= 0) |
| 187 | return matched; | 183 | return matched; |
| 188 | *rdata = devm_kzalloc(dev, sizeof(**rdata) * matched, GFP_KERNEL); | 184 | *rdata = devm_kzalloc(dev, sizeof(**rdata) * matched, GFP_KERNEL); |
| @@ -229,7 +225,6 @@ static int max77693_pmic_init_rdata(struct device *dev, | |||
| 229 | static int max77693_pmic_probe(struct platform_device *pdev) | 225 | static int max77693_pmic_probe(struct platform_device *pdev) |
| 230 | { | 226 | { |
| 231 | struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent); | 227 | struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent); |
| 232 | struct max77693_pmic_dev *max77693_pmic; | ||
| 233 | struct max77693_regulator_data *rdata = NULL; | 228 | struct max77693_regulator_data *rdata = NULL; |
| 234 | int num_rdata, i; | 229 | int num_rdata, i; |
| 235 | struct regulator_config config; | 230 | struct regulator_config config; |
| @@ -240,39 +235,22 @@ static int max77693_pmic_probe(struct platform_device *pdev) | |||
| 240 | return -ENODEV; | 235 | return -ENODEV; |
| 241 | } | 236 | } |
| 242 | 237 | ||
| 243 | max77693_pmic = devm_kzalloc(&pdev->dev, | ||
| 244 | sizeof(struct max77693_pmic_dev), | ||
| 245 | GFP_KERNEL); | ||
| 246 | if (!max77693_pmic) | ||
| 247 | return -ENOMEM; | ||
| 248 | |||
| 249 | max77693_pmic->rdev = devm_kzalloc(&pdev->dev, | ||
| 250 | sizeof(struct regulator_dev *) * num_rdata, | ||
| 251 | GFP_KERNEL); | ||
| 252 | if (!max77693_pmic->rdev) | ||
| 253 | return -ENOMEM; | ||
| 254 | |||
| 255 | max77693_pmic->dev = &pdev->dev; | ||
| 256 | max77693_pmic->iodev = iodev; | ||
| 257 | max77693_pmic->num_regulators = num_rdata; | ||
| 258 | |||
| 259 | config.dev = &pdev->dev; | 238 | config.dev = &pdev->dev; |
| 260 | config.regmap = iodev->regmap; | 239 | config.regmap = iodev->regmap; |
| 261 | config.driver_data = max77693_pmic; | ||
| 262 | platform_set_drvdata(pdev, max77693_pmic); | ||
| 263 | 240 | ||
| 264 | for (i = 0; i < max77693_pmic->num_regulators; i++) { | 241 | for (i = 0; i < num_rdata; i++) { |
| 265 | int id = rdata[i].id; | 242 | int id = rdata[i].id; |
| 243 | struct regulator_dev *rdev; | ||
| 266 | 244 | ||
| 267 | config.init_data = rdata[i].initdata; | 245 | config.init_data = rdata[i].initdata; |
| 268 | config.of_node = rdata[i].of_node; | 246 | config.of_node = rdata[i].of_node; |
| 269 | 247 | ||
| 270 | max77693_pmic->rdev[i] = devm_regulator_register(&pdev->dev, | 248 | rdev = devm_regulator_register(&pdev->dev, |
| 271 | ®ulators[id], &config); | 249 | ®ulators[id], &config); |
| 272 | if (IS_ERR(max77693_pmic->rdev[i])) { | 250 | if (IS_ERR(rdev)) { |
| 273 | dev_err(max77693_pmic->dev, | 251 | dev_err(&pdev->dev, |
| 274 | "Failed to initialize regulator-%d\n", id); | 252 | "Failed to initialize regulator-%d\n", id); |
| 275 | return PTR_ERR(max77693_pmic->rdev[i]); | 253 | return PTR_ERR(rdev); |
| 276 | } | 254 | } |
| 277 | } | 255 | } |
| 278 | 256 | ||
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c index 7f049c92ee52..3172da847d24 100644 --- a/drivers/regulator/max8649.c +++ b/drivers/regulator/max8649.c | |||
| @@ -49,7 +49,6 @@ | |||
| 49 | #define MAX8649_RAMP_DOWN (1 << 1) | 49 | #define MAX8649_RAMP_DOWN (1 << 1) |
| 50 | 50 | ||
| 51 | struct max8649_regulator_info { | 51 | struct max8649_regulator_info { |
| 52 | struct regulator_dev *regulator; | ||
| 53 | struct device *dev; | 52 | struct device *dev; |
| 54 | struct regmap *regmap; | 53 | struct regmap *regmap; |
| 55 | 54 | ||
| @@ -154,6 +153,7 @@ static int max8649_regulator_probe(struct i2c_client *client, | |||
| 154 | { | 153 | { |
| 155 | struct max8649_platform_data *pdata = dev_get_platdata(&client->dev); | 154 | struct max8649_platform_data *pdata = dev_get_platdata(&client->dev); |
| 156 | struct max8649_regulator_info *info = NULL; | 155 | struct max8649_regulator_info *info = NULL; |
| 156 | struct regulator_dev *regulator; | ||
| 157 | struct regulator_config config = { }; | 157 | struct regulator_config config = { }; |
| 158 | unsigned int val; | 158 | unsigned int val; |
| 159 | unsigned char data; | 159 | unsigned char data; |
| @@ -234,12 +234,12 @@ static int max8649_regulator_probe(struct i2c_client *client, | |||
| 234 | config.driver_data = info; | 234 | config.driver_data = info; |
| 235 | config.regmap = info->regmap; | 235 | config.regmap = info->regmap; |
| 236 | 236 | ||
| 237 | info->regulator = devm_regulator_register(&client->dev, &dcdc_desc, | 237 | regulator = devm_regulator_register(&client->dev, &dcdc_desc, |
| 238 | &config); | 238 | &config); |
| 239 | if (IS_ERR(info->regulator)) { | 239 | if (IS_ERR(regulator)) { |
| 240 | dev_err(info->dev, "failed to register regulator %s\n", | 240 | dev_err(info->dev, "failed to register regulator %s\n", |
| 241 | dcdc_desc.name); | 241 | dcdc_desc.name); |
| 242 | return PTR_ERR(info->regulator); | 242 | return PTR_ERR(regulator); |
| 243 | } | 243 | } |
| 244 | 244 | ||
| 245 | return 0; | 245 | return 0; |
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c index 8d94d3d7f97f..2fc411188794 100644 --- a/drivers/regulator/max8660.c +++ b/drivers/regulator/max8660.c | |||
| @@ -81,16 +81,17 @@ enum { | |||
| 81 | struct max8660 { | 81 | struct max8660 { |
| 82 | struct i2c_client *client; | 82 | struct i2c_client *client; |
| 83 | u8 shadow_regs[MAX8660_N_REGS]; /* as chip is write only */ | 83 | u8 shadow_regs[MAX8660_N_REGS]; /* as chip is write only */ |
| 84 | struct regulator_dev *rdev[]; | ||
| 85 | }; | 84 | }; |
| 86 | 85 | ||
| 87 | static int max8660_write(struct max8660 *max8660, u8 reg, u8 mask, u8 val) | 86 | static int max8660_write(struct max8660 *max8660, u8 reg, u8 mask, u8 val) |
| 88 | { | 87 | { |
| 89 | static const u8 max8660_addresses[MAX8660_N_REGS] = | 88 | static const u8 max8660_addresses[MAX8660_N_REGS] = { |
| 90 | { 0x10, 0x12, 0x20, 0x23, 0x24, 0x29, 0x2a, 0x32, 0x33, 0x39, 0x80 }; | 89 | 0x10, 0x12, 0x20, 0x23, 0x24, 0x29, 0x2a, 0x32, 0x33, 0x39, 0x80 |
| 90 | }; | ||
| 91 | 91 | ||
| 92 | int ret; | 92 | int ret; |
| 93 | u8 reg_val = (max8660->shadow_regs[reg] & mask) | val; | 93 | u8 reg_val = (max8660->shadow_regs[reg] & mask) | val; |
| 94 | |||
| 94 | dev_vdbg(&max8660->client->dev, "Writing reg %02x with %02x\n", | 95 | dev_vdbg(&max8660->client->dev, "Writing reg %02x with %02x\n", |
| 95 | max8660_addresses[reg], reg_val); | 96 | max8660_addresses[reg], reg_val); |
| 96 | 97 | ||
| @@ -112,6 +113,7 @@ static int max8660_dcdc_is_enabled(struct regulator_dev *rdev) | |||
| 112 | struct max8660 *max8660 = rdev_get_drvdata(rdev); | 113 | struct max8660 *max8660 = rdev_get_drvdata(rdev); |
| 113 | u8 val = max8660->shadow_regs[MAX8660_OVER1]; | 114 | u8 val = max8660->shadow_regs[MAX8660_OVER1]; |
| 114 | u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4; | 115 | u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4; |
| 116 | |||
| 115 | return !!(val & mask); | 117 | return !!(val & mask); |
| 116 | } | 118 | } |
| 117 | 119 | ||
| @@ -119,6 +121,7 @@ static int max8660_dcdc_enable(struct regulator_dev *rdev) | |||
| 119 | { | 121 | { |
| 120 | struct max8660 *max8660 = rdev_get_drvdata(rdev); | 122 | struct max8660 *max8660 = rdev_get_drvdata(rdev); |
| 121 | u8 bit = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4; | 123 | u8 bit = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4; |
| 124 | |||
| 122 | return max8660_write(max8660, MAX8660_OVER1, 0xff, bit); | 125 | return max8660_write(max8660, MAX8660_OVER1, 0xff, bit); |
| 123 | } | 126 | } |
| 124 | 127 | ||
| @@ -126,15 +129,16 @@ static int max8660_dcdc_disable(struct regulator_dev *rdev) | |||
| 126 | { | 129 | { |
| 127 | struct max8660 *max8660 = rdev_get_drvdata(rdev); | 130 | struct max8660 *max8660 = rdev_get_drvdata(rdev); |
| 128 | u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? ~1 : ~4; | 131 | u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? ~1 : ~4; |
| 132 | |||
| 129 | return max8660_write(max8660, MAX8660_OVER1, mask, 0); | 133 | return max8660_write(max8660, MAX8660_OVER1, mask, 0); |
| 130 | } | 134 | } |
| 131 | 135 | ||
| 132 | static int max8660_dcdc_get_voltage_sel(struct regulator_dev *rdev) | 136 | static int max8660_dcdc_get_voltage_sel(struct regulator_dev *rdev) |
| 133 | { | 137 | { |
| 134 | struct max8660 *max8660 = rdev_get_drvdata(rdev); | 138 | struct max8660 *max8660 = rdev_get_drvdata(rdev); |
| 135 | |||
| 136 | u8 reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2; | 139 | u8 reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2; |
| 137 | u8 selector = max8660->shadow_regs[reg]; | 140 | u8 selector = max8660->shadow_regs[reg]; |
| 141 | |||
| 138 | return selector; | 142 | return selector; |
| 139 | } | 143 | } |
| 140 | 144 | ||
| @@ -207,6 +211,7 @@ static int max8660_ldo67_is_enabled(struct regulator_dev *rdev) | |||
| 207 | struct max8660 *max8660 = rdev_get_drvdata(rdev); | 211 | struct max8660 *max8660 = rdev_get_drvdata(rdev); |
| 208 | u8 val = max8660->shadow_regs[MAX8660_OVER2]; | 212 | u8 val = max8660->shadow_regs[MAX8660_OVER2]; |
| 209 | u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4; | 213 | u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4; |
| 214 | |||
| 210 | return !!(val & mask); | 215 | return !!(val & mask); |
| 211 | } | 216 | } |
| 212 | 217 | ||
| @@ -214,6 +219,7 @@ static int max8660_ldo67_enable(struct regulator_dev *rdev) | |||
| 214 | { | 219 | { |
| 215 | struct max8660 *max8660 = rdev_get_drvdata(rdev); | 220 | struct max8660 *max8660 = rdev_get_drvdata(rdev); |
| 216 | u8 bit = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4; | 221 | u8 bit = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4; |
| 222 | |||
| 217 | return max8660_write(max8660, MAX8660_OVER2, 0xff, bit); | 223 | return max8660_write(max8660, MAX8660_OVER2, 0xff, bit); |
| 218 | } | 224 | } |
| 219 | 225 | ||
| @@ -221,15 +227,16 @@ static int max8660_ldo67_disable(struct regulator_dev *rdev) | |||
| 221 | { | 227 | { |
| 222 | struct max8660 *max8660 = rdev_get_drvdata(rdev); | 228 | struct max8660 *max8660 = rdev_get_drvdata(rdev); |
| 223 | u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? ~2 : ~4; | 229 | u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? ~2 : ~4; |
| 230 | |||
| 224 | return max8660_write(max8660, MAX8660_OVER2, mask, 0); | 231 | return max8660_write(max8660, MAX8660_OVER2, mask, 0); |
| 225 | } | 232 | } |
| 226 | 233 | ||
| 227 | static int max8660_ldo67_get_voltage_sel(struct regulator_dev *rdev) | 234 | static int max8660_ldo67_get_voltage_sel(struct regulator_dev *rdev) |
| 228 | { | 235 | { |
| 229 | struct max8660 *max8660 = rdev_get_drvdata(rdev); | 236 | struct max8660 *max8660 = rdev_get_drvdata(rdev); |
| 230 | |||
| 231 | u8 shift = (rdev_get_id(rdev) == MAX8660_V6) ? 0 : 4; | 237 | u8 shift = (rdev_get_id(rdev) == MAX8660_V6) ? 0 : 4; |
| 232 | u8 selector = (max8660->shadow_regs[MAX8660_L12VCR] >> shift) & 0xf; | 238 | u8 selector = (max8660->shadow_regs[MAX8660_L12VCR] >> shift) & 0xf; |
| 239 | |||
| 233 | return selector; | 240 | return selector; |
| 234 | } | 241 | } |
| 235 | 242 | ||
| @@ -330,7 +337,7 @@ static int max8660_pdata_from_dt(struct device *dev, | |||
| 330 | struct max8660_subdev_data *sub; | 337 | struct max8660_subdev_data *sub; |
| 331 | struct of_regulator_match rmatch[ARRAY_SIZE(max8660_reg)]; | 338 | struct of_regulator_match rmatch[ARRAY_SIZE(max8660_reg)]; |
| 332 | 339 | ||
| 333 | np = of_find_node_by_name(dev->of_node, "regulators"); | 340 | np = of_get_child_by_name(dev->of_node, "regulators"); |
| 334 | if (!np) { | 341 | if (!np) { |
| 335 | dev_err(dev, "missing 'regulators' subnode in DT\n"); | 342 | dev_err(dev, "missing 'regulators' subnode in DT\n"); |
| 336 | return -EINVAL; | 343 | return -EINVAL; |
| @@ -340,6 +347,7 @@ static int max8660_pdata_from_dt(struct device *dev, | |||
| 340 | rmatch[i].name = max8660_reg[i].name; | 347 | rmatch[i].name = max8660_reg[i].name; |
| 341 | 348 | ||
| 342 | matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(rmatch)); | 349 | matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(rmatch)); |
| 350 | of_node_put(np); | ||
| 343 | if (matched <= 0) | 351 | if (matched <= 0) |
| 344 | return matched; | 352 | return matched; |
| 345 | 353 | ||
| @@ -373,7 +381,6 @@ static inline int max8660_pdata_from_dt(struct device *dev, | |||
| 373 | static int max8660_probe(struct i2c_client *client, | 381 | static int max8660_probe(struct i2c_client *client, |
| 374 | const struct i2c_device_id *i2c_id) | 382 | const struct i2c_device_id *i2c_id) |
| 375 | { | 383 | { |
| 376 | struct regulator_dev **rdev; | ||
| 377 | struct device *dev = &client->dev; | 384 | struct device *dev = &client->dev; |
| 378 | struct max8660_platform_data *pdata = dev_get_platdata(dev); | 385 | struct max8660_platform_data *pdata = dev_get_platdata(dev); |
| 379 | struct regulator_config config = { }; | 386 | struct regulator_config config = { }; |
| @@ -406,14 +413,11 @@ static int max8660_probe(struct i2c_client *client, | |||
| 406 | return -EINVAL; | 413 | return -EINVAL; |
| 407 | } | 414 | } |
| 408 | 415 | ||
| 409 | max8660 = devm_kzalloc(dev, sizeof(struct max8660) + | 416 | max8660 = devm_kzalloc(dev, sizeof(struct max8660), GFP_KERNEL); |
| 410 | sizeof(struct regulator_dev *) * MAX8660_V_END, | ||
| 411 | GFP_KERNEL); | ||
| 412 | if (!max8660) | 417 | if (!max8660) |
| 413 | return -ENOMEM; | 418 | return -ENOMEM; |
| 414 | 419 | ||
| 415 | max8660->client = client; | 420 | max8660->client = client; |
| 416 | rdev = max8660->rdev; | ||
| 417 | 421 | ||
| 418 | if (pdata->en34_is_high) { | 422 | if (pdata->en34_is_high) { |
| 419 | /* Simulate always on */ | 423 | /* Simulate always on */ |
| @@ -481,6 +485,7 @@ static int max8660_probe(struct i2c_client *client, | |||
| 481 | 485 | ||
| 482 | /* Finally register devices */ | 486 | /* Finally register devices */ |
| 483 | for (i = 0; i < pdata->num_subdevs; i++) { | 487 | for (i = 0; i < pdata->num_subdevs; i++) { |
| 488 | struct regulator_dev *rdev; | ||
| 484 | 489 | ||
| 485 | id = pdata->subdevs[i].id; | 490 | id = pdata->subdevs[i].id; |
| 486 | 491 | ||
| @@ -489,13 +494,13 @@ static int max8660_probe(struct i2c_client *client, | |||
| 489 | config.of_node = of_node[i]; | 494 | config.of_node = of_node[i]; |
| 490 | config.driver_data = max8660; | 495 | config.driver_data = max8660; |
| 491 | 496 | ||
| 492 | rdev[i] = devm_regulator_register(&client->dev, | 497 | rdev = devm_regulator_register(&client->dev, |
| 493 | &max8660_reg[id], &config); | 498 | &max8660_reg[id], &config); |
| 494 | if (IS_ERR(rdev[i])) { | 499 | if (IS_ERR(rdev)) { |
| 495 | ret = PTR_ERR(rdev[i]); | 500 | ret = PTR_ERR(rdev); |
| 496 | dev_err(&client->dev, "failed to register %s\n", | 501 | dev_err(&client->dev, "failed to register %s\n", |
| 497 | max8660_reg[id].name); | 502 | max8660_reg[id].name); |
| 498 | return PTR_ERR(rdev[i]); | 503 | return PTR_ERR(rdev); |
| 499 | } | 504 | } |
| 500 | } | 505 | } |
| 501 | 506 | ||
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c index 0c5fe6c6ac26..9623e9e290bf 100644 --- a/drivers/regulator/max8907-regulator.c +++ b/drivers/regulator/max8907-regulator.c | |||
| @@ -34,7 +34,6 @@ | |||
| 34 | 34 | ||
| 35 | struct max8907_regulator { | 35 | struct max8907_regulator { |
| 36 | struct regulator_desc desc[MAX8907_NUM_REGULATORS]; | 36 | struct regulator_desc desc[MAX8907_NUM_REGULATORS]; |
| 37 | struct regulator_dev *rdev[MAX8907_NUM_REGULATORS]; | ||
| 38 | }; | 37 | }; |
| 39 | 38 | ||
| 40 | #define REG_MBATT() \ | 39 | #define REG_MBATT() \ |
| @@ -231,7 +230,7 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev) | |||
| 231 | if (!np) | 230 | if (!np) |
| 232 | return 0; | 231 | return 0; |
| 233 | 232 | ||
| 234 | regulators = of_find_node_by_name(np, "regulators"); | 233 | regulators = of_get_child_by_name(np, "regulators"); |
| 235 | if (!regulators) { | 234 | if (!regulators) { |
| 236 | dev_err(&pdev->dev, "regulators node not found\n"); | 235 | dev_err(&pdev->dev, "regulators node not found\n"); |
| 237 | return -EINVAL; | 236 | return -EINVAL; |
| @@ -292,10 +291,9 @@ static int max8907_regulator_probe(struct platform_device *pdev) | |||
| 292 | return ret; | 291 | return ret; |
| 293 | 292 | ||
| 294 | pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL); | 293 | pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL); |
| 295 | if (!pmic) { | 294 | if (!pmic) |
| 296 | dev_err(&pdev->dev, "Failed to alloc pmic\n"); | ||
| 297 | return -ENOMEM; | 295 | return -ENOMEM; |
| 298 | } | 296 | |
| 299 | platform_set_drvdata(pdev, pmic); | 297 | platform_set_drvdata(pdev, pmic); |
| 300 | 298 | ||
| 301 | memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc)); | 299 | memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc)); |
| @@ -311,6 +309,8 @@ static int max8907_regulator_probe(struct platform_device *pdev) | |||
| 311 | } | 309 | } |
| 312 | 310 | ||
| 313 | for (i = 0; i < MAX8907_NUM_REGULATORS; i++) { | 311 | for (i = 0; i < MAX8907_NUM_REGULATORS; i++) { |
| 312 | struct regulator_dev *rdev; | ||
| 313 | |||
| 314 | config.dev = pdev->dev.parent; | 314 | config.dev = pdev->dev.parent; |
| 315 | if (pdata) | 315 | if (pdata) |
| 316 | idata = pdata->init_data[i]; | 316 | idata = pdata->init_data[i]; |
| @@ -350,13 +350,13 @@ static int max8907_regulator_probe(struct platform_device *pdev) | |||
| 350 | pmic->desc[i].ops = &max8907_out5v_hwctl_ops; | 350 | pmic->desc[i].ops = &max8907_out5v_hwctl_ops; |
| 351 | } | 351 | } |
| 352 | 352 | ||
| 353 | pmic->rdev[i] = devm_regulator_register(&pdev->dev, | 353 | rdev = devm_regulator_register(&pdev->dev, |
| 354 | &pmic->desc[i], &config); | 354 | &pmic->desc[i], &config); |
| 355 | if (IS_ERR(pmic->rdev[i])) { | 355 | if (IS_ERR(rdev)) { |
| 356 | dev_err(&pdev->dev, | 356 | dev_err(&pdev->dev, |
| 357 | "failed to register %s regulator\n", | 357 | "failed to register %s regulator\n", |
| 358 | pmic->desc[i].name); | 358 | pmic->desc[i].name); |
| 359 | return PTR_ERR(pmic->rdev[i]); | 359 | return PTR_ERR(rdev); |
| 360 | } | 360 | } |
| 361 | } | 361 | } |
| 362 | 362 | ||
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c index 759510789e71..dad2bcd14e96 100644 --- a/drivers/regulator/max8925-regulator.c +++ b/drivers/regulator/max8925-regulator.c | |||
| @@ -36,9 +36,7 @@ | |||
| 36 | 36 | ||
| 37 | struct max8925_regulator_info { | 37 | struct max8925_regulator_info { |
| 38 | struct regulator_desc desc; | 38 | struct regulator_desc desc; |
| 39 | struct regulator_dev *regulator; | ||
| 40 | struct i2c_client *i2c; | 39 | struct i2c_client *i2c; |
| 41 | struct max8925_chip *chip; | ||
| 42 | 40 | ||
| 43 | int vol_reg; | 41 | int vol_reg; |
| 44 | int enable_reg; | 42 | int enable_reg; |
| @@ -251,10 +249,11 @@ static int max8925_regulator_dt_init(struct platform_device *pdev, | |||
| 251 | { | 249 | { |
| 252 | struct device_node *nproot, *np; | 250 | struct device_node *nproot, *np; |
| 253 | int rcount; | 251 | int rcount; |
| 252 | |||
| 254 | nproot = of_node_get(pdev->dev.parent->of_node); | 253 | nproot = of_node_get(pdev->dev.parent->of_node); |
| 255 | if (!nproot) | 254 | if (!nproot) |
| 256 | return -ENODEV; | 255 | return -ENODEV; |
| 257 | np = of_find_node_by_name(nproot, "regulators"); | 256 | np = of_get_child_by_name(nproot, "regulators"); |
| 258 | if (!np) { | 257 | if (!np) { |
| 259 | dev_err(&pdev->dev, "failed to find regulators node\n"); | 258 | dev_err(&pdev->dev, "failed to find regulators node\n"); |
| 260 | return -ENODEV; | 259 | return -ENODEV; |
| @@ -264,7 +263,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev, | |||
| 264 | &max8925_regulator_matches[ridx], 1); | 263 | &max8925_regulator_matches[ridx], 1); |
| 265 | of_node_put(np); | 264 | of_node_put(np); |
| 266 | if (rcount < 0) | 265 | if (rcount < 0) |
| 267 | return -ENODEV; | 266 | return rcount; |
| 268 | config->init_data = max8925_regulator_matches[ridx].init_data; | 267 | config->init_data = max8925_regulator_matches[ridx].init_data; |
| 269 | config->of_node = max8925_regulator_matches[ridx].of_node; | 268 | config->of_node = max8925_regulator_matches[ridx].of_node; |
| 270 | 269 | ||
| @@ -303,7 +302,6 @@ static int max8925_regulator_probe(struct platform_device *pdev) | |||
| 303 | return -EINVAL; | 302 | return -EINVAL; |
| 304 | } | 303 | } |
| 305 | ri->i2c = chip->i2c; | 304 | ri->i2c = chip->i2c; |
| 306 | ri->chip = chip; | ||
| 307 | 305 | ||
| 308 | config.dev = &pdev->dev; | 306 | config.dev = &pdev->dev; |
| 309 | config.driver_data = ri; | 307 | config.driver_data = ri; |
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c index 788e5ae2af1b..d920f5a32ec8 100644 --- a/drivers/regulator/max8952.c +++ b/drivers/regulator/max8952.c | |||
| @@ -48,9 +48,7 @@ enum { | |||
| 48 | 48 | ||
| 49 | struct max8952_data { | 49 | struct max8952_data { |
| 50 | struct i2c_client *client; | 50 | struct i2c_client *client; |
| 51 | struct device *dev; | ||
| 52 | struct max8952_platform_data *pdata; | 51 | struct max8952_platform_data *pdata; |
| 53 | struct regulator_dev *rdev; | ||
| 54 | 52 | ||
| 55 | bool vid0; | 53 | bool vid0; |
| 56 | bool vid1; | 54 | bool vid1; |
| @@ -59,6 +57,7 @@ struct max8952_data { | |||
| 59 | static int max8952_read_reg(struct max8952_data *max8952, u8 reg) | 57 | static int max8952_read_reg(struct max8952_data *max8952, u8 reg) |
| 60 | { | 58 | { |
| 61 | int ret = i2c_smbus_read_byte_data(max8952->client, reg); | 59 | int ret = i2c_smbus_read_byte_data(max8952->client, reg); |
| 60 | |||
| 62 | if (ret > 0) | 61 | if (ret > 0) |
| 63 | ret &= 0xff; | 62 | ret &= 0xff; |
| 64 | 63 | ||
| @@ -144,10 +143,8 @@ static struct max8952_platform_data *max8952_parse_dt(struct device *dev) | |||
| 144 | int i; | 143 | int i; |
| 145 | 144 | ||
| 146 | pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); | 145 | pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); |
| 147 | if (!pd) { | 146 | if (!pd) |
| 148 | dev_err(dev, "Failed to allocate platform data\n"); | ||
| 149 | return NULL; | 147 | return NULL; |
| 150 | } | ||
| 151 | 148 | ||
| 152 | pd->gpio_vid0 = of_get_named_gpio(np, "max8952,vid-gpios", 0); | 149 | pd->gpio_vid0 = of_get_named_gpio(np, "max8952,vid-gpios", 0); |
| 153 | pd->gpio_vid1 = of_get_named_gpio(np, "max8952,vid-gpios", 1); | 150 | pd->gpio_vid1 = of_get_named_gpio(np, "max8952,vid-gpios", 1); |
| @@ -199,6 +196,7 @@ static int max8952_pmic_probe(struct i2c_client *client, | |||
| 199 | struct max8952_platform_data *pdata = dev_get_platdata(&client->dev); | 196 | struct max8952_platform_data *pdata = dev_get_platdata(&client->dev); |
| 200 | struct regulator_config config = { }; | 197 | struct regulator_config config = { }; |
| 201 | struct max8952_data *max8952; | 198 | struct max8952_data *max8952; |
| 199 | struct regulator_dev *rdev; | ||
| 202 | 200 | ||
| 203 | int ret = 0, err = 0; | 201 | int ret = 0, err = 0; |
| 204 | 202 | ||
| @@ -219,10 +217,9 @@ static int max8952_pmic_probe(struct i2c_client *client, | |||
| 219 | return -ENOMEM; | 217 | return -ENOMEM; |
| 220 | 218 | ||
| 221 | max8952->client = client; | 219 | max8952->client = client; |
| 222 | max8952->dev = &client->dev; | ||
| 223 | max8952->pdata = pdata; | 220 | max8952->pdata = pdata; |
| 224 | 221 | ||
| 225 | config.dev = max8952->dev; | 222 | config.dev = &client->dev; |
| 226 | config.init_data = pdata->reg_data; | 223 | config.init_data = pdata->reg_data; |
| 227 | config.driver_data = max8952; | 224 | config.driver_data = max8952; |
| 228 | config.of_node = client->dev.of_node; | 225 | config.of_node = client->dev.of_node; |
| @@ -231,11 +228,11 @@ static int max8952_pmic_probe(struct i2c_client *client, | |||
| 231 | if (pdata->reg_data->constraints.boot_on) | 228 | if (pdata->reg_data->constraints.boot_on) |
| 232 | config.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; | 229 | config.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; |
| 233 | 230 | ||
| 234 | max8952->rdev = regulator_register(®ulator, &config); | 231 | rdev = devm_regulator_register(&client->dev, ®ulator, &config); |
| 235 | 232 | ||
| 236 | if (IS_ERR(max8952->rdev)) { | 233 | if (IS_ERR(rdev)) { |
| 237 | ret = PTR_ERR(max8952->rdev); | 234 | ret = PTR_ERR(rdev); |
| 238 | dev_err(max8952->dev, "regulator init failed (%d)\n", ret); | 235 | dev_err(&client->dev, "regulator init failed (%d)\n", ret); |
| 239 | return ret; | 236 | return ret; |
| 240 | } | 237 | } |
| 241 | 238 | ||
| @@ -263,7 +260,7 @@ static int max8952_pmic_probe(struct i2c_client *client, | |||
| 263 | err = 3; | 260 | err = 3; |
| 264 | 261 | ||
| 265 | if (err) { | 262 | if (err) { |
| 266 | dev_warn(max8952->dev, "VID0/1 gpio invalid: " | 263 | dev_warn(&client->dev, "VID0/1 gpio invalid: " |
| 267 | "DVS not available.\n"); | 264 | "DVS not available.\n"); |
| 268 | max8952->vid0 = 0; | 265 | max8952->vid0 = 0; |
| 269 | max8952->vid1 = 0; | 266 | max8952->vid1 = 0; |
| @@ -274,7 +271,7 @@ static int max8952_pmic_probe(struct i2c_client *client, | |||
| 274 | /* Disable Pulldown of EN only */ | 271 | /* Disable Pulldown of EN only */ |
| 275 | max8952_write_reg(max8952, MAX8952_REG_CONTROL, 0x60); | 272 | max8952_write_reg(max8952, MAX8952_REG_CONTROL, 0x60); |
| 276 | 273 | ||
| 277 | dev_err(max8952->dev, "DVS modes disabled because VID0 and VID1" | 274 | dev_err(&client->dev, "DVS modes disabled because VID0 and VID1" |
| 278 | " do not have proper controls.\n"); | 275 | " do not have proper controls.\n"); |
| 279 | } else { | 276 | } else { |
| 280 | /* | 277 | /* |
| @@ -321,9 +318,6 @@ static int max8952_pmic_remove(struct i2c_client *client) | |||
| 321 | { | 318 | { |
| 322 | struct max8952_data *max8952 = i2c_get_clientdata(client); | 319 | struct max8952_data *max8952 = i2c_get_clientdata(client); |
| 323 | struct max8952_platform_data *pdata = max8952->pdata; | 320 | struct max8952_platform_data *pdata = max8952->pdata; |
| 324 | struct regulator_dev *rdev = max8952->rdev; | ||
| 325 | |||
| 326 | regulator_unregister(rdev); | ||
| 327 | 321 | ||
| 328 | gpio_free(pdata->gpio_vid0); | 322 | gpio_free(pdata->gpio_vid0); |
| 329 | gpio_free(pdata->gpio_vid1); | 323 | gpio_free(pdata->gpio_vid1); |
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c index 892aa1e5b96c..dbedf1768db0 100644 --- a/drivers/regulator/max8973-regulator.c +++ b/drivers/regulator/max8973-regulator.c | |||
| @@ -93,7 +93,6 @@ | |||
| 93 | struct max8973_chip { | 93 | struct max8973_chip { |
| 94 | struct device *dev; | 94 | struct device *dev; |
| 95 | struct regulator_desc desc; | 95 | struct regulator_desc desc; |
| 96 | struct regulator_dev *rdev; | ||
| 97 | struct regmap *regmap; | 96 | struct regmap *regmap; |
| 98 | bool enable_external_control; | 97 | bool enable_external_control; |
| 99 | int dvs_gpio; | 98 | int dvs_gpio; |
| @@ -379,10 +378,8 @@ static int max8973_probe(struct i2c_client *client, | |||
| 379 | } | 378 | } |
| 380 | 379 | ||
| 381 | max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL); | 380 | max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL); |
| 382 | if (!max) { | 381 | if (!max) |
| 383 | dev_err(&client->dev, "Memory allocation for max failed\n"); | ||
| 384 | return -ENOMEM; | 382 | return -ENOMEM; |
| 385 | } | ||
| 386 | 383 | ||
| 387 | max->regmap = devm_regmap_init_i2c(client, &max8973_regmap_config); | 384 | max->regmap = devm_regmap_init_i2c(client, &max8973_regmap_config); |
| 388 | if (IS_ERR(max->regmap)) { | 385 | if (IS_ERR(max->regmap)) { |
| @@ -474,7 +471,6 @@ static int max8973_probe(struct i2c_client *client, | |||
| 474 | return ret; | 471 | return ret; |
| 475 | } | 472 | } |
| 476 | 473 | ||
| 477 | max->rdev = rdev; | ||
| 478 | return 0; | 474 | return 0; |
| 479 | } | 475 | } |
| 480 | 476 | ||
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c index 2d618fc9c1af..90b4c530dee5 100644 --- a/drivers/regulator/max8997.c +++ b/drivers/regulator/max8997.c | |||
| @@ -38,7 +38,6 @@ struct max8997_data { | |||
| 38 | struct device *dev; | 38 | struct device *dev; |
| 39 | struct max8997_dev *iodev; | 39 | struct max8997_dev *iodev; |
| 40 | int num_regulators; | 40 | int num_regulators; |
| 41 | struct regulator_dev **rdev; | ||
| 42 | int ramp_delay; /* in mV/us */ | 41 | int ramp_delay; /* in mV/us */ |
| 43 | 42 | ||
| 44 | bool buck1_gpiodvs; | 43 | bool buck1_gpiodvs; |
| @@ -924,7 +923,7 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, | |||
| 924 | return -ENODEV; | 923 | return -ENODEV; |
| 925 | } | 924 | } |
| 926 | 925 | ||
| 927 | regulators_np = of_find_node_by_name(pmic_np, "regulators"); | 926 | regulators_np = of_get_child_by_name(pmic_np, "regulators"); |
| 928 | if (!regulators_np) { | 927 | if (!regulators_np) { |
| 929 | dev_err(&pdev->dev, "could not find regulators sub-node\n"); | 928 | dev_err(&pdev->dev, "could not find regulators sub-node\n"); |
| 930 | return -EINVAL; | 929 | return -EINVAL; |
| @@ -937,7 +936,6 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, | |||
| 937 | pdata->num_regulators, GFP_KERNEL); | 936 | pdata->num_regulators, GFP_KERNEL); |
| 938 | if (!rdata) { | 937 | if (!rdata) { |
| 939 | of_node_put(regulators_np); | 938 | of_node_put(regulators_np); |
| 940 | dev_err(&pdev->dev, "could not allocate memory for regulator data\n"); | ||
| 941 | return -ENOMEM; | 939 | return -ENOMEM; |
| 942 | } | 940 | } |
| 943 | 941 | ||
| @@ -1030,10 +1028,10 @@ static int max8997_pmic_probe(struct platform_device *pdev) | |||
| 1030 | struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent); | 1028 | struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent); |
| 1031 | struct max8997_platform_data *pdata = iodev->pdata; | 1029 | struct max8997_platform_data *pdata = iodev->pdata; |
| 1032 | struct regulator_config config = { }; | 1030 | struct regulator_config config = { }; |
| 1033 | struct regulator_dev **rdev; | 1031 | struct regulator_dev *rdev; |
| 1034 | struct max8997_data *max8997; | 1032 | struct max8997_data *max8997; |
| 1035 | struct i2c_client *i2c; | 1033 | struct i2c_client *i2c; |
| 1036 | int i, ret, size, nr_dvs; | 1034 | int i, ret, nr_dvs; |
| 1037 | u8 max_buck1 = 0, max_buck2 = 0, max_buck5 = 0; | 1035 | u8 max_buck1 = 0, max_buck2 = 0, max_buck5 = 0; |
| 1038 | 1036 | ||
| 1039 | if (!pdata) { | 1037 | if (!pdata) { |
| @@ -1052,12 +1050,6 @@ static int max8997_pmic_probe(struct platform_device *pdev) | |||
| 1052 | if (!max8997) | 1050 | if (!max8997) |
| 1053 | return -ENOMEM; | 1051 | return -ENOMEM; |
| 1054 | 1052 | ||
| 1055 | size = sizeof(struct regulator_dev *) * pdata->num_regulators; | ||
| 1056 | max8997->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); | ||
| 1057 | if (!max8997->rdev) | ||
| 1058 | return -ENOMEM; | ||
| 1059 | |||
| 1060 | rdev = max8997->rdev; | ||
| 1061 | max8997->dev = &pdev->dev; | 1053 | max8997->dev = &pdev->dev; |
| 1062 | max8997->iodev = iodev; | 1054 | max8997->iodev = iodev; |
| 1063 | max8997->num_regulators = pdata->num_regulators; | 1055 | max8997->num_regulators = pdata->num_regulators; |
| @@ -1205,12 +1197,12 @@ static int max8997_pmic_probe(struct platform_device *pdev) | |||
| 1205 | config.driver_data = max8997; | 1197 | config.driver_data = max8997; |
| 1206 | config.of_node = pdata->regulators[i].reg_node; | 1198 | config.of_node = pdata->regulators[i].reg_node; |
| 1207 | 1199 | ||
| 1208 | rdev[i] = devm_regulator_register(&pdev->dev, ®ulators[id], | 1200 | rdev = devm_regulator_register(&pdev->dev, ®ulators[id], |
| 1209 | &config); | 1201 | &config); |
| 1210 | if (IS_ERR(rdev[i])) { | 1202 | if (IS_ERR(rdev)) { |
| 1211 | dev_err(max8997->dev, "regulator init failed for %d\n", | 1203 | dev_err(max8997->dev, "regulator init failed for %d\n", |
| 1212 | id); | 1204 | id); |
| 1213 | return PTR_ERR(rdev[i]); | 1205 | return PTR_ERR(rdev); |
| 1214 | } | 1206 | } |
| 1215 | } | 1207 | } |
| 1216 | 1208 | ||
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c index ae3f0656feb0..961091b46557 100644 --- a/drivers/regulator/max8998.c +++ b/drivers/regulator/max8998.c | |||
| @@ -40,7 +40,6 @@ struct max8998_data { | |||
| 40 | struct device *dev; | 40 | struct device *dev; |
| 41 | struct max8998_dev *iodev; | 41 | struct max8998_dev *iodev; |
| 42 | int num_regulators; | 42 | int num_regulators; |
| 43 | struct regulator_dev **rdev; | ||
| 44 | u8 buck1_vol[4]; /* voltages for selection */ | 43 | u8 buck1_vol[4]; /* voltages for selection */ |
| 45 | u8 buck2_vol[2]; | 44 | u8 buck2_vol[2]; |
| 46 | unsigned int buck1_idx; /* index to last changed voltage */ | 45 | unsigned int buck1_idx; /* index to last changed voltage */ |
| @@ -674,8 +673,10 @@ static int max8998_pmic_dt_parse_pdata(struct max8998_dev *iodev, | |||
| 674 | 673 | ||
| 675 | rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * | 674 | rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * |
| 676 | pdata->num_regulators, GFP_KERNEL); | 675 | pdata->num_regulators, GFP_KERNEL); |
| 677 | if (!rdata) | 676 | if (!rdata) { |
| 677 | of_node_put(regulators_np); | ||
| 678 | return -ENOMEM; | 678 | return -ENOMEM; |
| 679 | } | ||
| 679 | 680 | ||
| 680 | pdata->regulators = rdata; | 681 | pdata->regulators = rdata; |
| 681 | for (i = 0; i < ARRAY_SIZE(regulators); ++i) { | 682 | for (i = 0; i < ARRAY_SIZE(regulators); ++i) { |
| @@ -692,6 +693,9 @@ static int max8998_pmic_dt_parse_pdata(struct max8998_dev *iodev, | |||
| 692 | } | 693 | } |
| 693 | pdata->num_regulators = rdata - pdata->regulators; | 694 | pdata->num_regulators = rdata - pdata->regulators; |
| 694 | 695 | ||
| 696 | of_node_put(reg_np); | ||
| 697 | of_node_put(regulators_np); | ||
| 698 | |||
| 695 | ret = max8998_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np); | 699 | ret = max8998_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np); |
| 696 | if (ret) | 700 | if (ret) |
| 697 | return -EINVAL; | 701 | return -EINVAL; |
| @@ -741,10 +745,10 @@ static int max8998_pmic_probe(struct platform_device *pdev) | |||
| 741 | struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent); | 745 | struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent); |
| 742 | struct max8998_platform_data *pdata = iodev->pdata; | 746 | struct max8998_platform_data *pdata = iodev->pdata; |
| 743 | struct regulator_config config = { }; | 747 | struct regulator_config config = { }; |
| 744 | struct regulator_dev **rdev; | 748 | struct regulator_dev *rdev; |
| 745 | struct max8998_data *max8998; | 749 | struct max8998_data *max8998; |
| 746 | struct i2c_client *i2c; | 750 | struct i2c_client *i2c; |
| 747 | int i, ret, size; | 751 | int i, ret; |
| 748 | unsigned int v; | 752 | unsigned int v; |
| 749 | 753 | ||
| 750 | if (!pdata) { | 754 | if (!pdata) { |
| @@ -763,12 +767,6 @@ static int max8998_pmic_probe(struct platform_device *pdev) | |||
| 763 | if (!max8998) | 767 | if (!max8998) |
| 764 | return -ENOMEM; | 768 | return -ENOMEM; |
| 765 | 769 | ||
| 766 | size = sizeof(struct regulator_dev *) * pdata->num_regulators; | ||
| 767 | max8998->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); | ||
| 768 | if (!max8998->rdev) | ||
| 769 | return -ENOMEM; | ||
| 770 | |||
| 771 | rdev = max8998->rdev; | ||
| 772 | max8998->dev = &pdev->dev; | 770 | max8998->dev = &pdev->dev; |
| 773 | max8998->iodev = iodev; | 771 | max8998->iodev = iodev; |
| 774 | max8998->num_regulators = pdata->num_regulators; | 772 | max8998->num_regulators = pdata->num_regulators; |
| @@ -872,13 +870,12 @@ static int max8998_pmic_probe(struct platform_device *pdev) | |||
| 872 | config.init_data = pdata->regulators[i].initdata; | 870 | config.init_data = pdata->regulators[i].initdata; |
| 873 | config.driver_data = max8998; | 871 | config.driver_data = max8998; |
| 874 | 872 | ||
| 875 | rdev[i] = devm_regulator_register(&pdev->dev, | 873 | rdev = devm_regulator_register(&pdev->dev, ®ulators[index], |
| 876 | ®ulators[index], &config); | 874 | &config); |
| 877 | if (IS_ERR(rdev[i])) { | 875 | if (IS_ERR(rdev)) { |
| 878 | ret = PTR_ERR(rdev[i]); | 876 | ret = PTR_ERR(rdev); |
| 879 | dev_err(max8998->dev, "regulator %s init failed (%d)\n", | 877 | dev_err(max8998->dev, "regulator %s init failed (%d)\n", |
| 880 | regulators[index].name, ret); | 878 | regulators[index].name, ret); |
| 881 | rdev[i] = NULL; | ||
| 882 | return ret; | 879 | return ret; |
| 883 | } | 880 | } |
| 884 | } | 881 | } |
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c index da4859282302..05b971726ffa 100644 --- a/drivers/regulator/mc13xxx-regulator-core.c +++ b/drivers/regulator/mc13xxx-regulator-core.c | |||
| @@ -167,8 +167,10 @@ int mc13xxx_get_num_regulators_dt(struct platform_device *pdev) | |||
| 167 | struct device_node *parent; | 167 | struct device_node *parent; |
| 168 | int num; | 168 | int num; |
| 169 | 169 | ||
| 170 | of_node_get(pdev->dev.parent->of_node); | 170 | if (!pdev->dev.parent->of_node) |
| 171 | parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators"); | 171 | return -ENODEV; |
| 172 | |||
| 173 | parent = of_get_child_by_name(pdev->dev.parent->of_node, "regulators"); | ||
| 172 | if (!parent) | 174 | if (!parent) |
| 173 | return -ENODEV; | 175 | return -ENODEV; |
| 174 | 176 | ||
| @@ -187,8 +189,10 @@ struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt( | |||
| 187 | struct device_node *parent, *child; | 189 | struct device_node *parent, *child; |
| 188 | int i, parsed = 0; | 190 | int i, parsed = 0; |
| 189 | 191 | ||
| 190 | of_node_get(pdev->dev.parent->of_node); | 192 | if (!pdev->dev.parent->of_node) |
| 191 | parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators"); | 193 | return NULL; |
| 194 | |||
| 195 | parent = of_get_child_by_name(pdev->dev.parent->of_node, "regulators"); | ||
| 192 | if (!parent) | 196 | if (!parent) |
| 193 | return NULL; | 197 | return NULL; |
| 194 | 198 | ||
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index ab174f20ca11..67e678c4301c 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c | |||
| @@ -56,6 +56,8 @@ | |||
| 56 | #define PFUZE100_VGEN5VOL 0x70 | 56 | #define PFUZE100_VGEN5VOL 0x70 |
| 57 | #define PFUZE100_VGEN6VOL 0x71 | 57 | #define PFUZE100_VGEN6VOL 0x71 |
| 58 | 58 | ||
| 59 | enum chips { PFUZE100, PFUZE200 }; | ||
| 60 | |||
| 59 | struct pfuze_regulator { | 61 | struct pfuze_regulator { |
| 60 | struct regulator_desc desc; | 62 | struct regulator_desc desc; |
| 61 | unsigned char stby_reg; | 63 | unsigned char stby_reg; |
| @@ -63,6 +65,7 @@ struct pfuze_regulator { | |||
| 63 | }; | 65 | }; |
| 64 | 66 | ||
| 65 | struct pfuze_chip { | 67 | struct pfuze_chip { |
| 68 | int chip_id; | ||
| 66 | struct regmap *regmap; | 69 | struct regmap *regmap; |
| 67 | struct device *dev; | 70 | struct device *dev; |
| 68 | struct pfuze_regulator regulator_descs[PFUZE100_MAX_REGULATOR]; | 71 | struct pfuze_regulator regulator_descs[PFUZE100_MAX_REGULATOR]; |
| @@ -78,21 +81,23 @@ static const int pfuze100_vsnvs[] = { | |||
| 78 | }; | 81 | }; |
| 79 | 82 | ||
| 80 | static const struct i2c_device_id pfuze_device_id[] = { | 83 | static const struct i2c_device_id pfuze_device_id[] = { |
| 81 | {.name = "pfuze100"}, | 84 | {.name = "pfuze100", .driver_data = PFUZE100}, |
| 82 | {}, | 85 | {.name = "pfuze200", .driver_data = PFUZE200}, |
| 86 | { } | ||
| 83 | }; | 87 | }; |
| 84 | MODULE_DEVICE_TABLE(i2c, pfuze_device_id); | 88 | MODULE_DEVICE_TABLE(i2c, pfuze_device_id); |
| 85 | 89 | ||
| 86 | static const struct of_device_id pfuze_dt_ids[] = { | 90 | static const struct of_device_id pfuze_dt_ids[] = { |
| 87 | { .compatible = "fsl,pfuze100" }, | 91 | { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100}, |
| 88 | {}, | 92 | { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200}, |
| 93 | { } | ||
| 89 | }; | 94 | }; |
| 90 | MODULE_DEVICE_TABLE(of, pfuze_dt_ids); | 95 | MODULE_DEVICE_TABLE(of, pfuze_dt_ids); |
| 91 | 96 | ||
| 92 | static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) | 97 | static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) |
| 93 | { | 98 | { |
| 94 | struct pfuze_chip *pfuze100 = rdev_get_drvdata(rdev); | 99 | struct pfuze_chip *pfuze100 = rdev_get_drvdata(rdev); |
| 95 | int id = rdev->desc->id; | 100 | int id = rdev_get_id(rdev); |
| 96 | unsigned int ramp_bits; | 101 | unsigned int ramp_bits; |
| 97 | int ret; | 102 | int ret; |
| 98 | 103 | ||
| @@ -139,14 +144,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = { | |||
| 139 | 144 | ||
| 140 | }; | 145 | }; |
| 141 | 146 | ||
| 142 | #define PFUZE100_FIXED_REG(_name, base, voltage) \ | 147 | #define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \ |
| 143 | [PFUZE100_ ## _name] = { \ | 148 | [_chip ## _ ## _name] = { \ |
| 144 | .desc = { \ | 149 | .desc = { \ |
| 145 | .name = #_name, \ | 150 | .name = #_name, \ |
| 146 | .n_voltages = 1, \ | 151 | .n_voltages = 1, \ |
| 147 | .ops = &pfuze100_fixed_regulator_ops, \ | 152 | .ops = &pfuze100_fixed_regulator_ops, \ |
| 148 | .type = REGULATOR_VOLTAGE, \ | 153 | .type = REGULATOR_VOLTAGE, \ |
| 149 | .id = PFUZE100_ ## _name, \ | 154 | .id = _chip ## _ ## _name, \ |
| 150 | .owner = THIS_MODULE, \ | 155 | .owner = THIS_MODULE, \ |
| 151 | .min_uV = (voltage), \ | 156 | .min_uV = (voltage), \ |
| 152 | .enable_reg = (base), \ | 157 | .enable_reg = (base), \ |
| @@ -154,14 +159,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = { | |||
| 154 | }, \ | 159 | }, \ |
| 155 | } | 160 | } |
| 156 | 161 | ||
| 157 | #define PFUZE100_SW_REG(_name, base, min, max, step) \ | 162 | #define PFUZE100_SW_REG(_chip, _name, base, min, max, step) \ |
| 158 | [PFUZE100_ ## _name] = { \ | 163 | [_chip ## _ ## _name] = { \ |
| 159 | .desc = { \ | 164 | .desc = { \ |
| 160 | .name = #_name,\ | 165 | .name = #_name,\ |
| 161 | .n_voltages = ((max) - (min)) / (step) + 1, \ | 166 | .n_voltages = ((max) - (min)) / (step) + 1, \ |
| 162 | .ops = &pfuze100_sw_regulator_ops, \ | 167 | .ops = &pfuze100_sw_regulator_ops, \ |
| 163 | .type = REGULATOR_VOLTAGE, \ | 168 | .type = REGULATOR_VOLTAGE, \ |
| 164 | .id = PFUZE100_ ## _name, \ | 169 | .id = _chip ## _ ## _name, \ |
| 165 | .owner = THIS_MODULE, \ | 170 | .owner = THIS_MODULE, \ |
| 166 | .min_uV = (min), \ | 171 | .min_uV = (min), \ |
| 167 | .uV_step = (step), \ | 172 | .uV_step = (step), \ |
| @@ -172,14 +177,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = { | |||
| 172 | .stby_mask = 0x3f, \ | 177 | .stby_mask = 0x3f, \ |
| 173 | } | 178 | } |
| 174 | 179 | ||
| 175 | #define PFUZE100_SWB_REG(_name, base, mask, voltages) \ | 180 | #define PFUZE100_SWB_REG(_chip, _name, base, mask, voltages) \ |
| 176 | [PFUZE100_ ## _name] = { \ | 181 | [_chip ## _ ## _name] = { \ |
| 177 | .desc = { \ | 182 | .desc = { \ |
| 178 | .name = #_name, \ | 183 | .name = #_name, \ |
| 179 | .n_voltages = ARRAY_SIZE(voltages), \ | 184 | .n_voltages = ARRAY_SIZE(voltages), \ |
| 180 | .ops = &pfuze100_swb_regulator_ops, \ | 185 | .ops = &pfuze100_swb_regulator_ops, \ |
| 181 | .type = REGULATOR_VOLTAGE, \ | 186 | .type = REGULATOR_VOLTAGE, \ |
| 182 | .id = PFUZE100_ ## _name, \ | 187 | .id = _chip ## _ ## _name, \ |
| 183 | .owner = THIS_MODULE, \ | 188 | .owner = THIS_MODULE, \ |
| 184 | .volt_table = voltages, \ | 189 | .volt_table = voltages, \ |
| 185 | .vsel_reg = (base), \ | 190 | .vsel_reg = (base), \ |
| @@ -187,14 +192,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = { | |||
| 187 | }, \ | 192 | }, \ |
| 188 | } | 193 | } |
| 189 | 194 | ||
| 190 | #define PFUZE100_VGEN_REG(_name, base, min, max, step) \ | 195 | #define PFUZE100_VGEN_REG(_chip, _name, base, min, max, step) \ |
| 191 | [PFUZE100_ ## _name] = { \ | 196 | [_chip ## _ ## _name] = { \ |
| 192 | .desc = { \ | 197 | .desc = { \ |
| 193 | .name = #_name, \ | 198 | .name = #_name, \ |
| 194 | .n_voltages = ((max) - (min)) / (step) + 1, \ | 199 | .n_voltages = ((max) - (min)) / (step) + 1, \ |
| 195 | .ops = &pfuze100_ldo_regulator_ops, \ | 200 | .ops = &pfuze100_ldo_regulator_ops, \ |
| 196 | .type = REGULATOR_VOLTAGE, \ | 201 | .type = REGULATOR_VOLTAGE, \ |
| 197 | .id = PFUZE100_ ## _name, \ | 202 | .id = _chip ## _ ## _name, \ |
| 198 | .owner = THIS_MODULE, \ | 203 | .owner = THIS_MODULE, \ |
| 199 | .min_uV = (min), \ | 204 | .min_uV = (min), \ |
| 200 | .uV_step = (step), \ | 205 | .uV_step = (step), \ |
| @@ -207,25 +212,45 @@ static struct regulator_ops pfuze100_swb_regulator_ops = { | |||
| 207 | .stby_mask = 0x20, \ | 212 | .stby_mask = 0x20, \ |
| 208 | } | 213 | } |
| 209 | 214 | ||
| 215 | /* PFUZE100 */ | ||
| 210 | static struct pfuze_regulator pfuze100_regulators[] = { | 216 | static struct pfuze_regulator pfuze100_regulators[] = { |
| 211 | PFUZE100_SW_REG(SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000), | 217 | PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000), |
| 212 | PFUZE100_SW_REG(SW1C, PFUZE100_SW1CVOL, 300000, 1875000, 25000), | 218 | PFUZE100_SW_REG(PFUZE100, SW1C, PFUZE100_SW1CVOL, 300000, 1875000, 25000), |
| 213 | PFUZE100_SW_REG(SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000), | 219 | PFUZE100_SW_REG(PFUZE100, SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000), |
| 214 | PFUZE100_SW_REG(SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000), | 220 | PFUZE100_SW_REG(PFUZE100, SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000), |
| 215 | PFUZE100_SW_REG(SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000), | 221 | PFUZE100_SW_REG(PFUZE100, SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000), |
| 216 | PFUZE100_SW_REG(SW4, PFUZE100_SW4VOL, 400000, 1975000, 25000), | 222 | PFUZE100_SW_REG(PFUZE100, SW4, PFUZE100_SW4VOL, 400000, 1975000, 25000), |
| 217 | PFUZE100_SWB_REG(SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst), | 223 | PFUZE100_SWB_REG(PFUZE100, SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst), |
| 218 | PFUZE100_SWB_REG(VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs), | 224 | PFUZE100_SWB_REG(PFUZE100, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs), |
| 219 | PFUZE100_FIXED_REG(VREFDDR, PFUZE100_VREFDDRCON, 750000), | 225 | PFUZE100_FIXED_REG(PFUZE100, VREFDDR, PFUZE100_VREFDDRCON, 750000), |
| 220 | PFUZE100_VGEN_REG(VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000), | 226 | PFUZE100_VGEN_REG(PFUZE100, VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000), |
| 221 | PFUZE100_VGEN_REG(VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000), | 227 | PFUZE100_VGEN_REG(PFUZE100, VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000), |
| 222 | PFUZE100_VGEN_REG(VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000), | 228 | PFUZE100_VGEN_REG(PFUZE100, VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000), |
| 223 | PFUZE100_VGEN_REG(VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000), | 229 | PFUZE100_VGEN_REG(PFUZE100, VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000), |
| 224 | PFUZE100_VGEN_REG(VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000), | 230 | PFUZE100_VGEN_REG(PFUZE100, VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000), |
| 225 | PFUZE100_VGEN_REG(VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000), | 231 | PFUZE100_VGEN_REG(PFUZE100, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000), |
| 232 | }; | ||
| 233 | |||
| 234 | static struct pfuze_regulator pfuze200_regulators[] = { | ||
| 235 | PFUZE100_SW_REG(PFUZE200, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000), | ||
| 236 | PFUZE100_SW_REG(PFUZE200, SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000), | ||
| 237 | PFUZE100_SW_REG(PFUZE200, SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000), | ||
| 238 | PFUZE100_SW_REG(PFUZE200, SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000), | ||
| 239 | PFUZE100_SWB_REG(PFUZE200, SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst), | ||
| 240 | PFUZE100_SWB_REG(PFUZE200, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs), | ||
| 241 | PFUZE100_FIXED_REG(PFUZE200, VREFDDR, PFUZE100_VREFDDRCON, 750000), | ||
| 242 | PFUZE100_VGEN_REG(PFUZE200, VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000), | ||
| 243 | PFUZE100_VGEN_REG(PFUZE200, VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000), | ||
| 244 | PFUZE100_VGEN_REG(PFUZE200, VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000), | ||
| 245 | PFUZE100_VGEN_REG(PFUZE200, VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000), | ||
| 246 | PFUZE100_VGEN_REG(PFUZE200, VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000), | ||
| 247 | PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000), | ||
| 226 | }; | 248 | }; |
| 227 | 249 | ||
| 250 | static struct pfuze_regulator *pfuze_regulators; | ||
| 251 | |||
| 228 | #ifdef CONFIG_OF | 252 | #ifdef CONFIG_OF |
| 253 | /* PFUZE100 */ | ||
| 229 | static struct of_regulator_match pfuze100_matches[] = { | 254 | static struct of_regulator_match pfuze100_matches[] = { |
| 230 | { .name = "sw1ab", }, | 255 | { .name = "sw1ab", }, |
| 231 | { .name = "sw1c", }, | 256 | { .name = "sw1c", }, |
| @@ -244,24 +269,56 @@ static struct of_regulator_match pfuze100_matches[] = { | |||
| 244 | { .name = "vgen6", }, | 269 | { .name = "vgen6", }, |
| 245 | }; | 270 | }; |
| 246 | 271 | ||
| 272 | /* PFUZE200 */ | ||
| 273 | static struct of_regulator_match pfuze200_matches[] = { | ||
| 274 | |||
| 275 | { .name = "sw1ab", }, | ||
| 276 | { .name = "sw2", }, | ||
| 277 | { .name = "sw3a", }, | ||
| 278 | { .name = "sw3b", }, | ||
| 279 | { .name = "swbst", }, | ||
| 280 | { .name = "vsnvs", }, | ||
| 281 | { .name = "vrefddr", }, | ||
| 282 | { .name = "vgen1", }, | ||
| 283 | { .name = "vgen2", }, | ||
| 284 | { .name = "vgen3", }, | ||
| 285 | { .name = "vgen4", }, | ||
| 286 | { .name = "vgen5", }, | ||
| 287 | { .name = "vgen6", }, | ||
| 288 | }; | ||
| 289 | |||
| 290 | static struct of_regulator_match *pfuze_matches; | ||
| 291 | |||
| 247 | static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) | 292 | static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) |
| 248 | { | 293 | { |
| 249 | struct device *dev = chip->dev; | 294 | struct device *dev = chip->dev; |
| 250 | struct device_node *np, *parent; | 295 | struct device_node *np, *parent; |
| 251 | int ret; | 296 | int ret; |
| 252 | 297 | ||
| 253 | np = of_node_get(dev->parent->of_node); | 298 | np = of_node_get(dev->of_node); |
| 254 | if (!np) | 299 | if (!np) |
| 255 | return 0; | 300 | return -EINVAL; |
| 256 | 301 | ||
| 257 | parent = of_find_node_by_name(np, "regulators"); | 302 | parent = of_get_child_by_name(np, "regulators"); |
| 258 | if (!parent) { | 303 | if (!parent) { |
| 259 | dev_err(dev, "regulators node not found\n"); | 304 | dev_err(dev, "regulators node not found\n"); |
| 260 | return -EINVAL; | 305 | return -EINVAL; |
| 261 | } | 306 | } |
| 262 | 307 | ||
| 263 | ret = of_regulator_match(dev, parent, pfuze100_matches, | 308 | switch (chip->chip_id) { |
| 264 | ARRAY_SIZE(pfuze100_matches)); | 309 | case PFUZE200: |
| 310 | pfuze_matches = pfuze200_matches; | ||
| 311 | ret = of_regulator_match(dev, parent, pfuze200_matches, | ||
| 312 | ARRAY_SIZE(pfuze200_matches)); | ||
| 313 | break; | ||
| 314 | |||
| 315 | case PFUZE100: | ||
| 316 | default: | ||
| 317 | pfuze_matches = pfuze100_matches; | ||
| 318 | ret = of_regulator_match(dev, parent, pfuze100_matches, | ||
| 319 | ARRAY_SIZE(pfuze100_matches)); | ||
| 320 | break; | ||
| 321 | } | ||
| 265 | 322 | ||
| 266 | of_node_put(parent); | 323 | of_node_put(parent); |
| 267 | if (ret < 0) { | 324 | if (ret < 0) { |
| @@ -275,12 +332,12 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) | |||
| 275 | 332 | ||
| 276 | static inline struct regulator_init_data *match_init_data(int index) | 333 | static inline struct regulator_init_data *match_init_data(int index) |
| 277 | { | 334 | { |
| 278 | return pfuze100_matches[index].init_data; | 335 | return pfuze_matches[index].init_data; |
| 279 | } | 336 | } |
| 280 | 337 | ||
| 281 | static inline struct device_node *match_of_node(int index) | 338 | static inline struct device_node *match_of_node(int index) |
| 282 | { | 339 | { |
| 283 | return pfuze100_matches[index].of_node; | 340 | return pfuze_matches[index].of_node; |
| 284 | } | 341 | } |
| 285 | #else | 342 | #else |
| 286 | static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) | 343 | static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) |
| @@ -308,16 +365,14 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip) | |||
| 308 | if (ret) | 365 | if (ret) |
| 309 | return ret; | 366 | return ret; |
| 310 | 367 | ||
| 311 | switch (value & 0x0f) { | 368 | if (((value & 0x0f) == 0x8) && (pfuze_chip->chip_id == PFUZE100)) { |
| 312 | /* | 369 | /* |
| 313 | * Freescale misprogrammed 1-3% of parts prior to week 8 of 2013 | 370 | * Freescale misprogrammed 1-3% of parts prior to week 8 of 2013 |
| 314 | * as ID=8 | 371 | * as ID=8 in PFUZE100 |
| 315 | */ | 372 | */ |
| 316 | case 0x8: | ||
| 317 | dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8"); | 373 | dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8"); |
| 318 | case 0x0: | 374 | } else if ((value & 0x0f) != pfuze_chip->chip_id) { |
| 319 | break; | 375 | /* device id NOT match with your setting */ |
| 320 | default: | ||
| 321 | dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value); | 376 | dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value); |
| 322 | return -ENODEV; | 377 | return -ENODEV; |
| 323 | } | 378 | } |
| @@ -353,17 +408,31 @@ static int pfuze100_regulator_probe(struct i2c_client *client, | |||
| 353 | dev_get_platdata(&client->dev); | 408 | dev_get_platdata(&client->dev); |
| 354 | struct regulator_config config = { }; | 409 | struct regulator_config config = { }; |
| 355 | int i, ret; | 410 | int i, ret; |
| 411 | const struct of_device_id *match; | ||
| 412 | u32 regulator_num; | ||
| 413 | u32 sw_check_start, sw_check_end; | ||
| 356 | 414 | ||
| 357 | pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip), | 415 | pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip), |
| 358 | GFP_KERNEL); | 416 | GFP_KERNEL); |
| 359 | if (!pfuze_chip) | 417 | if (!pfuze_chip) |
| 360 | return -ENOMEM; | 418 | return -ENOMEM; |
| 361 | 419 | ||
| 362 | i2c_set_clientdata(client, pfuze_chip); | 420 | if (client->dev.of_node) { |
| 363 | 421 | match = of_match_device(of_match_ptr(pfuze_dt_ids), | |
| 364 | memcpy(pfuze_chip->regulator_descs, pfuze100_regulators, | 422 | &client->dev); |
| 365 | sizeof(pfuze_chip->regulator_descs)); | 423 | if (!match) { |
| 424 | dev_err(&client->dev, "Error: No device match found\n"); | ||
| 425 | return -ENODEV; | ||
| 426 | } | ||
| 427 | pfuze_chip->chip_id = (int)(long)match->data; | ||
| 428 | } else if (id) { | ||
| 429 | pfuze_chip->chip_id = id->driver_data; | ||
| 430 | } else { | ||
| 431 | dev_err(&client->dev, "No dts match or id table match found\n"); | ||
| 432 | return -ENODEV; | ||
| 433 | } | ||
| 366 | 434 | ||
| 435 | i2c_set_clientdata(client, pfuze_chip); | ||
| 367 | pfuze_chip->dev = &client->dev; | 436 | pfuze_chip->dev = &client->dev; |
| 368 | 437 | ||
| 369 | pfuze_chip->regmap = devm_regmap_init_i2c(client, &pfuze_regmap_config); | 438 | pfuze_chip->regmap = devm_regmap_init_i2c(client, &pfuze_regmap_config); |
| @@ -380,11 +449,34 @@ static int pfuze100_regulator_probe(struct i2c_client *client, | |||
| 380 | return ret; | 449 | return ret; |
| 381 | } | 450 | } |
| 382 | 451 | ||
| 452 | /* use the right regulators after identify the right device */ | ||
| 453 | switch (pfuze_chip->chip_id) { | ||
| 454 | case PFUZE200: | ||
| 455 | pfuze_regulators = pfuze200_regulators; | ||
| 456 | regulator_num = ARRAY_SIZE(pfuze200_regulators); | ||
| 457 | sw_check_start = PFUZE200_SW2; | ||
| 458 | sw_check_end = PFUZE200_SW3B; | ||
| 459 | break; | ||
| 460 | |||
| 461 | case PFUZE100: | ||
| 462 | default: | ||
| 463 | pfuze_regulators = pfuze100_regulators; | ||
| 464 | regulator_num = ARRAY_SIZE(pfuze100_regulators); | ||
| 465 | sw_check_start = PFUZE100_SW2; | ||
| 466 | sw_check_end = PFUZE100_SW4; | ||
| 467 | break; | ||
| 468 | } | ||
| 469 | dev_info(&client->dev, "pfuze%s found.\n", | ||
| 470 | (pfuze_chip->chip_id == PFUZE100) ? "100" : "200"); | ||
| 471 | |||
| 472 | memcpy(pfuze_chip->regulator_descs, pfuze_regulators, | ||
| 473 | sizeof(pfuze_chip->regulator_descs)); | ||
| 474 | |||
| 383 | ret = pfuze_parse_regulators_dt(pfuze_chip); | 475 | ret = pfuze_parse_regulators_dt(pfuze_chip); |
| 384 | if (ret) | 476 | if (ret) |
| 385 | return ret; | 477 | return ret; |
| 386 | 478 | ||
| 387 | for (i = 0; i < PFUZE100_MAX_REGULATOR; i++) { | 479 | for (i = 0; i < regulator_num; i++) { |
| 388 | struct regulator_init_data *init_data; | 480 | struct regulator_init_data *init_data; |
| 389 | struct regulator_desc *desc; | 481 | struct regulator_desc *desc; |
| 390 | int val; | 482 | int val; |
| @@ -397,7 +489,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client, | |||
| 397 | init_data = match_init_data(i); | 489 | init_data = match_init_data(i); |
| 398 | 490 | ||
| 399 | /* SW2~SW4 high bit check and modify the voltage value table */ | 491 | /* SW2~SW4 high bit check and modify the voltage value table */ |
| 400 | if (i > PFUZE100_SW1C && i < PFUZE100_SWBST) { | 492 | if (i >= sw_check_start && i <= sw_check_end) { |
| 401 | regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val); | 493 | regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val); |
| 402 | if (val & 0x40) { | 494 | if (val & 0x40) { |
| 403 | desc->min_uV = 800000; | 495 | desc->min_uV = 800000; |
| @@ -415,7 +507,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client, | |||
| 415 | devm_regulator_register(&client->dev, desc, &config); | 507 | devm_regulator_register(&client->dev, desc, &config); |
| 416 | if (IS_ERR(pfuze_chip->regulators[i])) { | 508 | if (IS_ERR(pfuze_chip->regulators[i])) { |
| 417 | dev_err(&client->dev, "register regulator%s failed\n", | 509 | dev_err(&client->dev, "register regulator%s failed\n", |
| 418 | pfuze100_regulators[i].desc.name); | 510 | pfuze_regulators[i].desc.name); |
| 419 | return PTR_ERR(pfuze_chip->regulators[i]); | 511 | return PTR_ERR(pfuze_chip->regulators[i]); |
| 420 | } | 512 | } |
| 421 | } | 513 | } |
| @@ -435,6 +527,6 @@ static struct i2c_driver pfuze_driver = { | |||
| 435 | module_i2c_driver(pfuze_driver); | 527 | module_i2c_driver(pfuze_driver); |
| 436 | 528 | ||
| 437 | MODULE_AUTHOR("Robin Gong <b38343@freescale.com>"); | 529 | MODULE_AUTHOR("Robin Gong <b38343@freescale.com>"); |
| 438 | MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100 PMIC"); | 530 | MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/PFUZE200 PMIC"); |
| 439 | MODULE_LICENSE("GPL v2"); | 531 | MODULE_LICENSE("GPL v2"); |
| 440 | MODULE_ALIAS("i2c:pfuze100-regulator"); | 532 | MODULE_ALIAS("i2c:pfuze100-regulator"); |
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c index b58affb33143..4c414ae109ae 100644 --- a/drivers/regulator/rc5t583-regulator.c +++ b/drivers/regulator/rc5t583-regulator.c | |||
| @@ -119,7 +119,6 @@ static int rc5t583_regulator_probe(struct platform_device *pdev) | |||
| 119 | { | 119 | { |
| 120 | struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent); | 120 | struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent); |
| 121 | struct rc5t583_platform_data *pdata = dev_get_platdata(rc5t583->dev); | 121 | struct rc5t583_platform_data *pdata = dev_get_platdata(rc5t583->dev); |
| 122 | struct regulator_init_data *reg_data; | ||
| 123 | struct regulator_config config = { }; | 122 | struct regulator_config config = { }; |
| 124 | struct rc5t583_regulator *reg = NULL; | 123 | struct rc5t583_regulator *reg = NULL; |
| 125 | struct rc5t583_regulator *regs; | 124 | struct rc5t583_regulator *regs; |
| @@ -135,19 +134,11 @@ static int rc5t583_regulator_probe(struct platform_device *pdev) | |||
| 135 | 134 | ||
| 136 | regs = devm_kzalloc(&pdev->dev, RC5T583_REGULATOR_MAX * | 135 | regs = devm_kzalloc(&pdev->dev, RC5T583_REGULATOR_MAX * |
| 137 | sizeof(struct rc5t583_regulator), GFP_KERNEL); | 136 | sizeof(struct rc5t583_regulator), GFP_KERNEL); |
| 138 | if (!regs) { | 137 | if (!regs) |
| 139 | dev_err(&pdev->dev, "Memory allocation failed exiting..\n"); | ||
| 140 | return -ENOMEM; | 138 | return -ENOMEM; |
| 141 | } | ||
| 142 | 139 | ||
| 143 | 140 | ||
| 144 | for (id = 0; id < RC5T583_REGULATOR_MAX; ++id) { | 141 | for (id = 0; id < RC5T583_REGULATOR_MAX; ++id) { |
| 145 | reg_data = pdata->reg_init_data[id]; | ||
| 146 | |||
| 147 | /* No need to register if there is no regulator data */ | ||
| 148 | if (!reg_data) | ||
| 149 | continue; | ||
| 150 | |||
| 151 | reg = ®s[id]; | 142 | reg = ®s[id]; |
| 152 | ri = &rc5t583_reg_info[id]; | 143 | ri = &rc5t583_reg_info[id]; |
| 153 | reg->reg_info = ri; | 144 | reg->reg_info = ri; |
| @@ -169,7 +160,7 @@ static int rc5t583_regulator_probe(struct platform_device *pdev) | |||
| 169 | 160 | ||
| 170 | skip_ext_pwr_config: | 161 | skip_ext_pwr_config: |
| 171 | config.dev = &pdev->dev; | 162 | config.dev = &pdev->dev; |
| 172 | config.init_data = reg_data; | 163 | config.init_data = pdata->reg_init_data[id]; |
| 173 | config.driver_data = reg; | 164 | config.driver_data = reg; |
| 174 | config.regmap = rc5t583->regmap; | 165 | config.regmap = rc5t583->regmap; |
| 175 | 166 | ||
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 1f6fd4c45006..68fd54702edb 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c | |||
| @@ -73,7 +73,7 @@ static int s2mps11_regulator_set_voltage_time_sel(struct regulator_dev *rdev, | |||
| 73 | unsigned int ramp_delay = 0; | 73 | unsigned int ramp_delay = 0; |
| 74 | int old_volt, new_volt; | 74 | int old_volt, new_volt; |
| 75 | 75 | ||
| 76 | switch (rdev->desc->id) { | 76 | switch (rdev_get_id(rdev)) { |
| 77 | case S2MPS11_BUCK2: | 77 | case S2MPS11_BUCK2: |
| 78 | ramp_delay = s2mps11->ramp_delay2; | 78 | ramp_delay = s2mps11->ramp_delay2; |
| 79 | break; | 79 | break; |
| @@ -113,7 +113,7 @@ static int s2mps11_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) | |||
| 113 | unsigned int ramp_enable = 1, enable_shift = 0; | 113 | unsigned int ramp_enable = 1, enable_shift = 0; |
| 114 | int ret; | 114 | int ret; |
| 115 | 115 | ||
| 116 | switch (rdev->desc->id) { | 116 | switch (rdev_get_id(rdev)) { |
| 117 | case S2MPS11_BUCK1: | 117 | case S2MPS11_BUCK1: |
| 118 | if (ramp_delay > s2mps11->ramp_delay16) | 118 | if (ramp_delay > s2mps11->ramp_delay16) |
| 119 | s2mps11->ramp_delay16 = ramp_delay; | 119 | s2mps11->ramp_delay16 = ramp_delay; |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 7afd373b9595..c4cde9c08f1f 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
| @@ -580,10 +580,12 @@ static int s3c_rtc_suspend(struct device *dev) | |||
| 580 | 580 | ||
| 581 | clk_enable(rtc_clk); | 581 | clk_enable(rtc_clk); |
| 582 | /* save TICNT for anyone using periodic interrupts */ | 582 | /* save TICNT for anyone using periodic interrupts */ |
| 583 | ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); | ||
| 584 | if (s3c_rtc_cpu_type == TYPE_S3C64XX) { | 583 | if (s3c_rtc_cpu_type == TYPE_S3C64XX) { |
| 585 | ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON); | 584 | ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON); |
| 586 | ticnt_en_save &= S3C64XX_RTCCON_TICEN; | 585 | ticnt_en_save &= S3C64XX_RTCCON_TICEN; |
| 586 | ticnt_save = readl(s3c_rtc_base + S3C2410_TICNT); | ||
| 587 | } else { | ||
| 588 | ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); | ||
| 587 | } | 589 | } |
| 588 | s3c_rtc_enable(pdev, 0); | 590 | s3c_rtc_enable(pdev, 0); |
| 589 | 591 | ||
| @@ -605,10 +607,15 @@ static int s3c_rtc_resume(struct device *dev) | |||
| 605 | 607 | ||
| 606 | clk_enable(rtc_clk); | 608 | clk_enable(rtc_clk); |
| 607 | s3c_rtc_enable(pdev, 1); | 609 | s3c_rtc_enable(pdev, 1); |
| 608 | writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); | 610 | if (s3c_rtc_cpu_type == TYPE_S3C64XX) { |
| 609 | if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) { | 611 | writel(ticnt_save, s3c_rtc_base + S3C2410_TICNT); |
| 610 | tmp = readw(s3c_rtc_base + S3C2410_RTCCON); | 612 | if (ticnt_en_save) { |
| 611 | writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); | 613 | tmp = readw(s3c_rtc_base + S3C2410_RTCCON); |
| 614 | writew(tmp | ticnt_en_save, | ||
| 615 | s3c_rtc_base + S3C2410_RTCCON); | ||
| 616 | } | ||
| 617 | } else { | ||
| 618 | writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); | ||
| 612 | } | 619 | } |
| 613 | 620 | ||
| 614 | if (device_may_wakeup(dev) && wake_en) { | 621 | if (device_may_wakeup(dev) && wake_en) { |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index c3a83df07894..795ed61a5496 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
| @@ -1660,7 +1660,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) | |||
| 1660 | QDIO_FLAG_CLEANUP_USING_CLEAR); | 1660 | QDIO_FLAG_CLEANUP_USING_CLEAR); |
| 1661 | if (rc) | 1661 | if (rc) |
| 1662 | QETH_CARD_TEXT_(card, 3, "1err%d", rc); | 1662 | QETH_CARD_TEXT_(card, 3, "1err%d", rc); |
| 1663 | qdio_free(CARD_DDEV(card)); | ||
| 1664 | atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); | 1663 | atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); |
| 1665 | break; | 1664 | break; |
| 1666 | case QETH_QDIO_CLEANING: | 1665 | case QETH_QDIO_CLEANING: |
| @@ -2605,6 +2604,7 @@ static int qeth_mpc_initialize(struct qeth_card *card) | |||
| 2605 | return 0; | 2604 | return 0; |
| 2606 | out_qdio: | 2605 | out_qdio: |
| 2607 | qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); | 2606 | qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); |
| 2607 | qdio_free(CARD_DDEV(card)); | ||
| 2608 | return rc; | 2608 | return rc; |
| 2609 | } | 2609 | } |
| 2610 | 2610 | ||
| @@ -4906,9 +4906,11 @@ retry: | |||
| 4906 | if (retries < 3) | 4906 | if (retries < 3) |
| 4907 | QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", | 4907 | QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", |
| 4908 | dev_name(&card->gdev->dev)); | 4908 | dev_name(&card->gdev->dev)); |
| 4909 | rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); | ||
| 4909 | ccw_device_set_offline(CARD_DDEV(card)); | 4910 | ccw_device_set_offline(CARD_DDEV(card)); |
| 4910 | ccw_device_set_offline(CARD_WDEV(card)); | 4911 | ccw_device_set_offline(CARD_WDEV(card)); |
| 4911 | ccw_device_set_offline(CARD_RDEV(card)); | 4912 | ccw_device_set_offline(CARD_RDEV(card)); |
| 4913 | qdio_free(CARD_DDEV(card)); | ||
| 4912 | rc = ccw_device_set_online(CARD_RDEV(card)); | 4914 | rc = ccw_device_set_online(CARD_RDEV(card)); |
| 4913 | if (rc) | 4915 | if (rc) |
| 4914 | goto retriable; | 4916 | goto retriable; |
| @@ -4918,7 +4920,6 @@ retry: | |||
| 4918 | rc = ccw_device_set_online(CARD_DDEV(card)); | 4920 | rc = ccw_device_set_online(CARD_DDEV(card)); |
| 4919 | if (rc) | 4921 | if (rc) |
| 4920 | goto retriable; | 4922 | goto retriable; |
| 4921 | rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); | ||
| 4922 | retriable: | 4923 | retriable: |
| 4923 | if (rc == -ERESTARTSYS) { | 4924 | if (rc == -ERESTARTSYS) { |
| 4924 | QETH_DBF_TEXT(SETUP, 2, "break1"); | 4925 | QETH_DBF_TEXT(SETUP, 2, "break1"); |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 0710550093ce..908d82529ee9 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
| @@ -1091,6 +1091,7 @@ out_remove: | |||
| 1091 | ccw_device_set_offline(CARD_DDEV(card)); | 1091 | ccw_device_set_offline(CARD_DDEV(card)); |
| 1092 | ccw_device_set_offline(CARD_WDEV(card)); | 1092 | ccw_device_set_offline(CARD_WDEV(card)); |
| 1093 | ccw_device_set_offline(CARD_RDEV(card)); | 1093 | ccw_device_set_offline(CARD_RDEV(card)); |
| 1094 | qdio_free(CARD_DDEV(card)); | ||
| 1094 | if (recover_flag == CARD_STATE_RECOVER) | 1095 | if (recover_flag == CARD_STATE_RECOVER) |
| 1095 | card->state = CARD_STATE_RECOVER; | 1096 | card->state = CARD_STATE_RECOVER; |
| 1096 | else | 1097 | else |
| @@ -1132,6 +1133,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, | |||
| 1132 | rc = (rc2) ? rc2 : rc3; | 1133 | rc = (rc2) ? rc2 : rc3; |
| 1133 | if (rc) | 1134 | if (rc) |
| 1134 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 1135 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
| 1136 | qdio_free(CARD_DDEV(card)); | ||
| 1135 | if (recover_flag == CARD_STATE_UP) | 1137 | if (recover_flag == CARD_STATE_UP) |
| 1136 | card->state = CARD_STATE_RECOVER; | 1138 | card->state = CARD_STATE_RECOVER; |
| 1137 | /* let user_space know that device is offline */ | 1139 | /* let user_space know that device is offline */ |
| @@ -1194,6 +1196,7 @@ static void qeth_l2_shutdown(struct ccwgroup_device *gdev) | |||
| 1194 | qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); | 1196 | qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); |
| 1195 | qeth_qdio_clear_card(card, 0); | 1197 | qeth_qdio_clear_card(card, 0); |
| 1196 | qeth_clear_qdio_buffers(card); | 1198 | qeth_clear_qdio_buffers(card); |
| 1199 | qdio_free(CARD_DDEV(card)); | ||
| 1197 | } | 1200 | } |
| 1198 | 1201 | ||
| 1199 | static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) | 1202 | static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 0f430424c3b8..3524d34ff694 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
| @@ -3447,6 +3447,7 @@ out_remove: | |||
| 3447 | ccw_device_set_offline(CARD_DDEV(card)); | 3447 | ccw_device_set_offline(CARD_DDEV(card)); |
| 3448 | ccw_device_set_offline(CARD_WDEV(card)); | 3448 | ccw_device_set_offline(CARD_WDEV(card)); |
| 3449 | ccw_device_set_offline(CARD_RDEV(card)); | 3449 | ccw_device_set_offline(CARD_RDEV(card)); |
| 3450 | qdio_free(CARD_DDEV(card)); | ||
| 3450 | if (recover_flag == CARD_STATE_RECOVER) | 3451 | if (recover_flag == CARD_STATE_RECOVER) |
| 3451 | card->state = CARD_STATE_RECOVER; | 3452 | card->state = CARD_STATE_RECOVER; |
| 3452 | else | 3453 | else |
| @@ -3493,6 +3494,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, | |||
| 3493 | rc = (rc2) ? rc2 : rc3; | 3494 | rc = (rc2) ? rc2 : rc3; |
| 3494 | if (rc) | 3495 | if (rc) |
| 3495 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 3496 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
| 3497 | qdio_free(CARD_DDEV(card)); | ||
| 3496 | if (recover_flag == CARD_STATE_UP) | 3498 | if (recover_flag == CARD_STATE_UP) |
| 3497 | card->state = CARD_STATE_RECOVER; | 3499 | card->state = CARD_STATE_RECOVER; |
| 3498 | /* let user_space know that device is offline */ | 3500 | /* let user_space know that device is offline */ |
| @@ -3545,6 +3547,7 @@ static void qeth_l3_shutdown(struct ccwgroup_device *gdev) | |||
| 3545 | qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); | 3547 | qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); |
| 3546 | qeth_qdio_clear_card(card, 0); | 3548 | qeth_qdio_clear_card(card, 0); |
| 3547 | qeth_clear_qdio_buffers(card); | 3549 | qeth_clear_qdio_buffers(card); |
| 3550 | qdio_free(CARD_DDEV(card)); | ||
| 3548 | } | 3551 | } |
| 3549 | 3552 | ||
| 3550 | static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) | 3553 | static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 1f375051483a..5642a9b250c2 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
| @@ -325,7 +325,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) | |||
| 325 | if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) | 325 | if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) |
| 326 | continue; | 326 | continue; |
| 327 | 327 | ||
| 328 | if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) | 328 | if (sc->device->lun != abrt_task->sc->device->lun) |
| 329 | continue; | 329 | continue; |
| 330 | 330 | ||
| 331 | /* Invalidate WRB Posted for this Task */ | 331 | /* Invalidate WRB Posted for this Task */ |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index ed880891cb7c..e9279a8c1e1c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
| @@ -594,13 +594,13 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) | |||
| 594 | mp_req->mp_resp_bd = NULL; | 594 | mp_req->mp_resp_bd = NULL; |
| 595 | } | 595 | } |
| 596 | if (mp_req->req_buf) { | 596 | if (mp_req->req_buf) { |
| 597 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 597 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 598 | mp_req->req_buf, | 598 | mp_req->req_buf, |
| 599 | mp_req->req_buf_dma); | 599 | mp_req->req_buf_dma); |
| 600 | mp_req->req_buf = NULL; | 600 | mp_req->req_buf = NULL; |
| 601 | } | 601 | } |
| 602 | if (mp_req->resp_buf) { | 602 | if (mp_req->resp_buf) { |
| 603 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 603 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 604 | mp_req->resp_buf, | 604 | mp_req->resp_buf, |
| 605 | mp_req->resp_buf_dma); | 605 | mp_req->resp_buf_dma); |
| 606 | mp_req->resp_buf = NULL; | 606 | mp_req->resp_buf = NULL; |
| @@ -622,7 +622,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
| 622 | 622 | ||
| 623 | mp_req->req_len = sizeof(struct fcp_cmnd); | 623 | mp_req->req_len = sizeof(struct fcp_cmnd); |
| 624 | io_req->data_xfer_len = mp_req->req_len; | 624 | io_req->data_xfer_len = mp_req->req_len; |
| 625 | mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 625 | mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 626 | &mp_req->req_buf_dma, | 626 | &mp_req->req_buf_dma, |
| 627 | GFP_ATOMIC); | 627 | GFP_ATOMIC); |
| 628 | if (!mp_req->req_buf) { | 628 | if (!mp_req->req_buf) { |
| @@ -631,7 +631,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
| 631 | return FAILED; | 631 | return FAILED; |
| 632 | } | 632 | } |
| 633 | 633 | ||
| 634 | mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 634 | mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 635 | &mp_req->resp_buf_dma, | 635 | &mp_req->resp_buf_dma, |
| 636 | GFP_ATOMIC); | 636 | GFP_ATOMIC); |
| 637 | if (!mp_req->resp_buf) { | 637 | if (!mp_req->resp_buf) { |
| @@ -639,8 +639,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
| 639 | bnx2fc_free_mp_resc(io_req); | 639 | bnx2fc_free_mp_resc(io_req); |
| 640 | return FAILED; | 640 | return FAILED; |
| 641 | } | 641 | } |
| 642 | memset(mp_req->req_buf, 0, PAGE_SIZE); | 642 | memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE); |
| 643 | memset(mp_req->resp_buf, 0, PAGE_SIZE); | 643 | memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE); |
| 644 | 644 | ||
| 645 | /* Allocate and map mp_req_bd and mp_resp_bd */ | 645 | /* Allocate and map mp_req_bd and mp_resp_bd */ |
| 646 | sz = sizeof(struct fcoe_bd_ctx); | 646 | sz = sizeof(struct fcoe_bd_ctx); |
| @@ -665,7 +665,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
| 665 | mp_req_bd = mp_req->mp_req_bd; | 665 | mp_req_bd = mp_req->mp_req_bd; |
| 666 | mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; | 666 | mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; |
| 667 | mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); | 667 | mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); |
| 668 | mp_req_bd->buf_len = PAGE_SIZE; | 668 | mp_req_bd->buf_len = CNIC_PAGE_SIZE; |
| 669 | mp_req_bd->flags = 0; | 669 | mp_req_bd->flags = 0; |
| 670 | 670 | ||
| 671 | /* | 671 | /* |
| @@ -677,7 +677,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
| 677 | addr = mp_req->resp_buf_dma; | 677 | addr = mp_req->resp_buf_dma; |
| 678 | mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; | 678 | mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; |
| 679 | mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); | 679 | mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); |
| 680 | mp_resp_bd->buf_len = PAGE_SIZE; | 680 | mp_resp_bd->buf_len = CNIC_PAGE_SIZE; |
| 681 | mp_resp_bd->flags = 0; | 681 | mp_resp_bd->flags = 0; |
| 682 | 682 | ||
| 683 | return SUCCESS; | 683 | return SUCCESS; |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 4d93177dfb53..d9bae5672273 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c | |||
| @@ -673,7 +673,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 673 | 673 | ||
| 674 | /* Allocate and map SQ */ | 674 | /* Allocate and map SQ */ |
| 675 | tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; | 675 | tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; |
| 676 | tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 676 | tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
| 677 | CNIC_PAGE_MASK; | ||
| 677 | 678 | ||
| 678 | tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, | 679 | tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, |
| 679 | &tgt->sq_dma, GFP_KERNEL); | 680 | &tgt->sq_dma, GFP_KERNEL); |
| @@ -686,7 +687,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 686 | 687 | ||
| 687 | /* Allocate and map CQ */ | 688 | /* Allocate and map CQ */ |
| 688 | tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; | 689 | tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; |
| 689 | tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 690 | tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
| 691 | CNIC_PAGE_MASK; | ||
| 690 | 692 | ||
| 691 | tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, | 693 | tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, |
| 692 | &tgt->cq_dma, GFP_KERNEL); | 694 | &tgt->cq_dma, GFP_KERNEL); |
| @@ -699,7 +701,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 699 | 701 | ||
| 700 | /* Allocate and map RQ and RQ PBL */ | 702 | /* Allocate and map RQ and RQ PBL */ |
| 701 | tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; | 703 | tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; |
| 702 | tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 704 | tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
| 705 | CNIC_PAGE_MASK; | ||
| 703 | 706 | ||
| 704 | tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, | 707 | tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, |
| 705 | &tgt->rq_dma, GFP_KERNEL); | 708 | &tgt->rq_dma, GFP_KERNEL); |
| @@ -710,8 +713,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 710 | } | 713 | } |
| 711 | memset(tgt->rq, 0, tgt->rq_mem_size); | 714 | memset(tgt->rq, 0, tgt->rq_mem_size); |
| 712 | 715 | ||
| 713 | tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *); | 716 | tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
| 714 | tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 717 | tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & |
| 718 | CNIC_PAGE_MASK; | ||
| 715 | 719 | ||
| 716 | tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, | 720 | tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, |
| 717 | &tgt->rq_pbl_dma, GFP_KERNEL); | 721 | &tgt->rq_pbl_dma, GFP_KERNEL); |
| @@ -722,7 +726,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 722 | } | 726 | } |
| 723 | 727 | ||
| 724 | memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); | 728 | memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); |
| 725 | num_pages = tgt->rq_mem_size / PAGE_SIZE; | 729 | num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE; |
| 726 | page = tgt->rq_dma; | 730 | page = tgt->rq_dma; |
| 727 | pbl = (u32 *)tgt->rq_pbl; | 731 | pbl = (u32 *)tgt->rq_pbl; |
| 728 | 732 | ||
| @@ -731,13 +735,13 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 731 | pbl++; | 735 | pbl++; |
| 732 | *pbl = (u32)((u64)page >> 32); | 736 | *pbl = (u32)((u64)page >> 32); |
| 733 | pbl++; | 737 | pbl++; |
| 734 | page += PAGE_SIZE; | 738 | page += CNIC_PAGE_SIZE; |
| 735 | } | 739 | } |
| 736 | 740 | ||
| 737 | /* Allocate and map XFERQ */ | 741 | /* Allocate and map XFERQ */ |
| 738 | tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; | 742 | tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; |
| 739 | tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) & | 743 | tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
| 740 | PAGE_MASK; | 744 | CNIC_PAGE_MASK; |
| 741 | 745 | ||
| 742 | tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, | 746 | tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, |
| 743 | &tgt->xferq_dma, GFP_KERNEL); | 747 | &tgt->xferq_dma, GFP_KERNEL); |
| @@ -750,8 +754,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 750 | 754 | ||
| 751 | /* Allocate and map CONFQ & CONFQ PBL */ | 755 | /* Allocate and map CONFQ & CONFQ PBL */ |
| 752 | tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; | 756 | tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; |
| 753 | tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) & | 757 | tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
| 754 | PAGE_MASK; | 758 | CNIC_PAGE_MASK; |
| 755 | 759 | ||
| 756 | tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, | 760 | tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, |
| 757 | &tgt->confq_dma, GFP_KERNEL); | 761 | &tgt->confq_dma, GFP_KERNEL); |
| @@ -763,9 +767,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 763 | memset(tgt->confq, 0, tgt->confq_mem_size); | 767 | memset(tgt->confq, 0, tgt->confq_mem_size); |
| 764 | 768 | ||
| 765 | tgt->confq_pbl_size = | 769 | tgt->confq_pbl_size = |
| 766 | (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *); | 770 | (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
| 767 | tgt->confq_pbl_size = | 771 | tgt->confq_pbl_size = |
| 768 | (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 772 | (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
| 769 | 773 | ||
| 770 | tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, | 774 | tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, |
| 771 | tgt->confq_pbl_size, | 775 | tgt->confq_pbl_size, |
| @@ -777,7 +781,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 777 | } | 781 | } |
| 778 | 782 | ||
| 779 | memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); | 783 | memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); |
| 780 | num_pages = tgt->confq_mem_size / PAGE_SIZE; | 784 | num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE; |
| 781 | page = tgt->confq_dma; | 785 | page = tgt->confq_dma; |
| 782 | pbl = (u32 *)tgt->confq_pbl; | 786 | pbl = (u32 *)tgt->confq_pbl; |
| 783 | 787 | ||
| @@ -786,7 +790,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 786 | pbl++; | 790 | pbl++; |
| 787 | *pbl = (u32)((u64)page >> 32); | 791 | *pbl = (u32)((u64)page >> 32); |
| 788 | pbl++; | 792 | pbl++; |
| 789 | page += PAGE_SIZE; | 793 | page += CNIC_PAGE_SIZE; |
| 790 | } | 794 | } |
| 791 | 795 | ||
| 792 | /* Allocate and map ConnDB */ | 796 | /* Allocate and map ConnDB */ |
| @@ -805,8 +809,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 805 | 809 | ||
| 806 | /* Allocate and map LCQ */ | 810 | /* Allocate and map LCQ */ |
| 807 | tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; | 811 | tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; |
| 808 | tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) & | 812 | tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
| 809 | PAGE_MASK; | 813 | CNIC_PAGE_MASK; |
| 810 | 814 | ||
| 811 | tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, | 815 | tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, |
| 812 | &tgt->lcq_dma, GFP_KERNEL); | 816 | &tgt->lcq_dma, GFP_KERNEL); |
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index e4cf23df4b4f..b87a1933f880 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c | |||
| @@ -61,7 +61,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) | |||
| 61 | * yield integral num of page buffers | 61 | * yield integral num of page buffers |
| 62 | */ | 62 | */ |
| 63 | /* adjust SQ */ | 63 | /* adjust SQ */ |
| 64 | num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; | 64 | num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; |
| 65 | if (hba->max_sqes < num_elements_per_pg) | 65 | if (hba->max_sqes < num_elements_per_pg) |
| 66 | hba->max_sqes = num_elements_per_pg; | 66 | hba->max_sqes = num_elements_per_pg; |
| 67 | else if (hba->max_sqes % num_elements_per_pg) | 67 | else if (hba->max_sqes % num_elements_per_pg) |
| @@ -69,7 +69,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) | |||
| 69 | ~(num_elements_per_pg - 1); | 69 | ~(num_elements_per_pg - 1); |
| 70 | 70 | ||
| 71 | /* adjust CQ */ | 71 | /* adjust CQ */ |
| 72 | num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE; | 72 | num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE; |
| 73 | if (hba->max_cqes < num_elements_per_pg) | 73 | if (hba->max_cqes < num_elements_per_pg) |
| 74 | hba->max_cqes = num_elements_per_pg; | 74 | hba->max_cqes = num_elements_per_pg; |
| 75 | else if (hba->max_cqes % num_elements_per_pg) | 75 | else if (hba->max_cqes % num_elements_per_pg) |
| @@ -77,7 +77,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) | |||
| 77 | ~(num_elements_per_pg - 1); | 77 | ~(num_elements_per_pg - 1); |
| 78 | 78 | ||
| 79 | /* adjust RQ */ | 79 | /* adjust RQ */ |
| 80 | num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE; | 80 | num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE; |
| 81 | if (hba->max_rqes < num_elements_per_pg) | 81 | if (hba->max_rqes < num_elements_per_pg) |
| 82 | hba->max_rqes = num_elements_per_pg; | 82 | hba->max_rqes = num_elements_per_pg; |
| 83 | else if (hba->max_rqes % num_elements_per_pg) | 83 | else if (hba->max_rqes % num_elements_per_pg) |
| @@ -959,7 +959,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
| 959 | 959 | ||
| 960 | /* SQ page table */ | 960 | /* SQ page table */ |
| 961 | memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); | 961 | memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); |
| 962 | num_pages = ep->qp.sq_mem_size / PAGE_SIZE; | 962 | num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE; |
| 963 | page = ep->qp.sq_phys; | 963 | page = ep->qp.sq_phys; |
| 964 | 964 | ||
| 965 | if (cnic_dev_10g) | 965 | if (cnic_dev_10g) |
| @@ -973,7 +973,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
| 973 | ptbl++; | 973 | ptbl++; |
| 974 | *ptbl = (u32) ((u64) page >> 32); | 974 | *ptbl = (u32) ((u64) page >> 32); |
| 975 | ptbl++; | 975 | ptbl++; |
| 976 | page += PAGE_SIZE; | 976 | page += CNIC_PAGE_SIZE; |
| 977 | } else { | 977 | } else { |
| 978 | /* PTE is written in big endian format for | 978 | /* PTE is written in big endian format for |
| 979 | * 5706/5708/5709 devices */ | 979 | * 5706/5708/5709 devices */ |
| @@ -981,13 +981,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
| 981 | ptbl++; | 981 | ptbl++; |
| 982 | *ptbl = (u32) page; | 982 | *ptbl = (u32) page; |
| 983 | ptbl++; | 983 | ptbl++; |
| 984 | page += PAGE_SIZE; | 984 | page += CNIC_PAGE_SIZE; |
| 985 | } | 985 | } |
| 986 | } | 986 | } |
| 987 | 987 | ||
| 988 | /* RQ page table */ | 988 | /* RQ page table */ |
| 989 | memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); | 989 | memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); |
| 990 | num_pages = ep->qp.rq_mem_size / PAGE_SIZE; | 990 | num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE; |
| 991 | page = ep->qp.rq_phys; | 991 | page = ep->qp.rq_phys; |
| 992 | 992 | ||
| 993 | if (cnic_dev_10g) | 993 | if (cnic_dev_10g) |
| @@ -1001,7 +1001,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
| 1001 | ptbl++; | 1001 | ptbl++; |
| 1002 | *ptbl = (u32) ((u64) page >> 32); | 1002 | *ptbl = (u32) ((u64) page >> 32); |
| 1003 | ptbl++; | 1003 | ptbl++; |
| 1004 | page += PAGE_SIZE; | 1004 | page += CNIC_PAGE_SIZE; |
| 1005 | } else { | 1005 | } else { |
| 1006 | /* PTE is written in big endian format for | 1006 | /* PTE is written in big endian format for |
| 1007 | * 5706/5708/5709 devices */ | 1007 | * 5706/5708/5709 devices */ |
| @@ -1009,13 +1009,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
| 1009 | ptbl++; | 1009 | ptbl++; |
| 1010 | *ptbl = (u32) page; | 1010 | *ptbl = (u32) page; |
| 1011 | ptbl++; | 1011 | ptbl++; |
| 1012 | page += PAGE_SIZE; | 1012 | page += CNIC_PAGE_SIZE; |
| 1013 | } | 1013 | } |
| 1014 | } | 1014 | } |
| 1015 | 1015 | ||
| 1016 | /* CQ page table */ | 1016 | /* CQ page table */ |
| 1017 | memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); | 1017 | memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); |
| 1018 | num_pages = ep->qp.cq_mem_size / PAGE_SIZE; | 1018 | num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE; |
| 1019 | page = ep->qp.cq_phys; | 1019 | page = ep->qp.cq_phys; |
| 1020 | 1020 | ||
| 1021 | if (cnic_dev_10g) | 1021 | if (cnic_dev_10g) |
| @@ -1029,7 +1029,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
| 1029 | ptbl++; | 1029 | ptbl++; |
| 1030 | *ptbl = (u32) ((u64) page >> 32); | 1030 | *ptbl = (u32) ((u64) page >> 32); |
| 1031 | ptbl++; | 1031 | ptbl++; |
| 1032 | page += PAGE_SIZE; | 1032 | page += CNIC_PAGE_SIZE; |
| 1033 | } else { | 1033 | } else { |
| 1034 | /* PTE is written in big endian format for | 1034 | /* PTE is written in big endian format for |
| 1035 | * 5706/5708/5709 devices */ | 1035 | * 5706/5708/5709 devices */ |
| @@ -1037,7 +1037,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
| 1037 | ptbl++; | 1037 | ptbl++; |
| 1038 | *ptbl = (u32) page; | 1038 | *ptbl = (u32) page; |
| 1039 | ptbl++; | 1039 | ptbl++; |
| 1040 | page += PAGE_SIZE; | 1040 | page += CNIC_PAGE_SIZE; |
| 1041 | } | 1041 | } |
| 1042 | } | 1042 | } |
| 1043 | } | 1043 | } |
| @@ -1064,11 +1064,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
| 1064 | /* Allocate page table memory for SQ which is page aligned */ | 1064 | /* Allocate page table memory for SQ which is page aligned */ |
| 1065 | ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; | 1065 | ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; |
| 1066 | ep->qp.sq_mem_size = | 1066 | ep->qp.sq_mem_size = |
| 1067 | (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1067 | (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
| 1068 | ep->qp.sq_pgtbl_size = | 1068 | ep->qp.sq_pgtbl_size = |
| 1069 | (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *); | 1069 | (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
| 1070 | ep->qp.sq_pgtbl_size = | 1070 | ep->qp.sq_pgtbl_size = |
| 1071 | (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1071 | (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
| 1072 | 1072 | ||
| 1073 | ep->qp.sq_pgtbl_virt = | 1073 | ep->qp.sq_pgtbl_virt = |
| 1074 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, | 1074 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, |
| @@ -1101,11 +1101,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
| 1101 | /* Allocate page table memory for CQ which is page aligned */ | 1101 | /* Allocate page table memory for CQ which is page aligned */ |
| 1102 | ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; | 1102 | ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; |
| 1103 | ep->qp.cq_mem_size = | 1103 | ep->qp.cq_mem_size = |
| 1104 | (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1104 | (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
| 1105 | ep->qp.cq_pgtbl_size = | 1105 | ep->qp.cq_pgtbl_size = |
| 1106 | (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *); | 1106 | (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
| 1107 | ep->qp.cq_pgtbl_size = | 1107 | ep->qp.cq_pgtbl_size = |
| 1108 | (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1108 | (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
| 1109 | 1109 | ||
| 1110 | ep->qp.cq_pgtbl_virt = | 1110 | ep->qp.cq_pgtbl_virt = |
| 1111 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, | 1111 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, |
| @@ -1144,11 +1144,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
| 1144 | /* Allocate page table memory for RQ which is page aligned */ | 1144 | /* Allocate page table memory for RQ which is page aligned */ |
| 1145 | ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; | 1145 | ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; |
| 1146 | ep->qp.rq_mem_size = | 1146 | ep->qp.rq_mem_size = |
| 1147 | (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1147 | (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
| 1148 | ep->qp.rq_pgtbl_size = | 1148 | ep->qp.rq_pgtbl_size = |
| 1149 | (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *); | 1149 | (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
| 1150 | ep->qp.rq_pgtbl_size = | 1150 | ep->qp.rq_pgtbl_size = |
| 1151 | (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1151 | (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
| 1152 | 1152 | ||
| 1153 | ep->qp.rq_pgtbl_virt = | 1153 | ep->qp.rq_pgtbl_virt = |
| 1154 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, | 1154 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, |
| @@ -1270,7 +1270,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) | |||
| 1270 | bnx2i_adjust_qp_size(hba); | 1270 | bnx2i_adjust_qp_size(hba); |
| 1271 | 1271 | ||
| 1272 | iscsi_init.flags = | 1272 | iscsi_init.flags = |
| 1273 | ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; | 1273 | (CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; |
| 1274 | if (en_tcp_dack) | 1274 | if (en_tcp_dack) |
| 1275 | iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; | 1275 | iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; |
| 1276 | iscsi_init.reserved0 = 0; | 1276 | iscsi_init.reserved0 = 0; |
| @@ -1288,15 +1288,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) | |||
| 1288 | ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); | 1288 | ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); |
| 1289 | iscsi_init.num_ccells_per_conn = hba->num_ccell; | 1289 | iscsi_init.num_ccells_per_conn = hba->num_ccell; |
| 1290 | iscsi_init.num_tasks_per_conn = hba->max_sqes; | 1290 | iscsi_init.num_tasks_per_conn = hba->max_sqes; |
| 1291 | iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; | 1291 | iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; |
| 1292 | iscsi_init.sq_num_wqes = hba->max_sqes; | 1292 | iscsi_init.sq_num_wqes = hba->max_sqes; |
| 1293 | iscsi_init.cq_log_wqes_per_page = | 1293 | iscsi_init.cq_log_wqes_per_page = |
| 1294 | (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE); | 1294 | (u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE); |
| 1295 | iscsi_init.cq_num_wqes = hba->max_cqes; | 1295 | iscsi_init.cq_num_wqes = hba->max_cqes; |
| 1296 | iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + | 1296 | iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + |
| 1297 | (PAGE_SIZE - 1)) / PAGE_SIZE; | 1297 | (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; |
| 1298 | iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + | 1298 | iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + |
| 1299 | (PAGE_SIZE - 1)) / PAGE_SIZE; | 1299 | (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; |
| 1300 | iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; | 1300 | iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; |
| 1301 | iscsi_init.rq_num_wqes = hba->max_rqes; | 1301 | iscsi_init.rq_num_wqes = hba->max_rqes; |
| 1302 | 1302 | ||
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 854dad7d5b03..c8b0aff5bbd4 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c | |||
| @@ -525,7 +525,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) | |||
| 525 | struct iscsi_bd *mp_bdt; | 525 | struct iscsi_bd *mp_bdt; |
| 526 | u64 addr; | 526 | u64 addr; |
| 527 | 527 | ||
| 528 | hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 528 | hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 529 | &hba->mp_bd_dma, GFP_KERNEL); | 529 | &hba->mp_bd_dma, GFP_KERNEL); |
| 530 | if (!hba->mp_bd_tbl) { | 530 | if (!hba->mp_bd_tbl) { |
| 531 | printk(KERN_ERR "unable to allocate Middle Path BDT\n"); | 531 | printk(KERN_ERR "unable to allocate Middle Path BDT\n"); |
| @@ -533,11 +533,12 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) | |||
| 533 | goto out; | 533 | goto out; |
| 534 | } | 534 | } |
| 535 | 535 | ||
| 536 | hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 536 | hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, |
| 537 | CNIC_PAGE_SIZE, | ||
| 537 | &hba->dummy_buf_dma, GFP_KERNEL); | 538 | &hba->dummy_buf_dma, GFP_KERNEL); |
| 538 | if (!hba->dummy_buffer) { | 539 | if (!hba->dummy_buffer) { |
| 539 | printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); | 540 | printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); |
| 540 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 541 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 541 | hba->mp_bd_tbl, hba->mp_bd_dma); | 542 | hba->mp_bd_tbl, hba->mp_bd_dma); |
| 542 | hba->mp_bd_tbl = NULL; | 543 | hba->mp_bd_tbl = NULL; |
| 543 | rc = -1; | 544 | rc = -1; |
| @@ -548,7 +549,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) | |||
| 548 | addr = (unsigned long) hba->dummy_buf_dma; | 549 | addr = (unsigned long) hba->dummy_buf_dma; |
| 549 | mp_bdt->buffer_addr_lo = addr & 0xffffffff; | 550 | mp_bdt->buffer_addr_lo = addr & 0xffffffff; |
| 550 | mp_bdt->buffer_addr_hi = addr >> 32; | 551 | mp_bdt->buffer_addr_hi = addr >> 32; |
| 551 | mp_bdt->buffer_length = PAGE_SIZE; | 552 | mp_bdt->buffer_length = CNIC_PAGE_SIZE; |
| 552 | mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | | 553 | mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | |
| 553 | ISCSI_BD_FIRST_IN_BD_CHAIN; | 554 | ISCSI_BD_FIRST_IN_BD_CHAIN; |
| 554 | out: | 555 | out: |
| @@ -565,12 +566,12 @@ out: | |||
| 565 | static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) | 566 | static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) |
| 566 | { | 567 | { |
| 567 | if (hba->mp_bd_tbl) { | 568 | if (hba->mp_bd_tbl) { |
| 568 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 569 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 569 | hba->mp_bd_tbl, hba->mp_bd_dma); | 570 | hba->mp_bd_tbl, hba->mp_bd_dma); |
| 570 | hba->mp_bd_tbl = NULL; | 571 | hba->mp_bd_tbl = NULL; |
| 571 | } | 572 | } |
| 572 | if (hba->dummy_buffer) { | 573 | if (hba->dummy_buffer) { |
| 573 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 574 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 574 | hba->dummy_buffer, hba->dummy_buf_dma); | 575 | hba->dummy_buffer, hba->dummy_buf_dma); |
| 575 | hba->dummy_buffer = NULL; | 576 | hba->dummy_buffer = NULL; |
| 576 | } | 577 | } |
| @@ -934,14 +935,14 @@ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, | |||
| 934 | struct bnx2i_conn *bnx2i_conn) | 935 | struct bnx2i_conn *bnx2i_conn) |
| 935 | { | 936 | { |
| 936 | if (bnx2i_conn->gen_pdu.resp_bd_tbl) { | 937 | if (bnx2i_conn->gen_pdu.resp_bd_tbl) { |
| 937 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 938 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 938 | bnx2i_conn->gen_pdu.resp_bd_tbl, | 939 | bnx2i_conn->gen_pdu.resp_bd_tbl, |
| 939 | bnx2i_conn->gen_pdu.resp_bd_dma); | 940 | bnx2i_conn->gen_pdu.resp_bd_dma); |
| 940 | bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; | 941 | bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; |
| 941 | } | 942 | } |
| 942 | 943 | ||
| 943 | if (bnx2i_conn->gen_pdu.req_bd_tbl) { | 944 | if (bnx2i_conn->gen_pdu.req_bd_tbl) { |
| 944 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 945 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 945 | bnx2i_conn->gen_pdu.req_bd_tbl, | 946 | bnx2i_conn->gen_pdu.req_bd_tbl, |
| 946 | bnx2i_conn->gen_pdu.req_bd_dma); | 947 | bnx2i_conn->gen_pdu.req_bd_dma); |
| 947 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; | 948 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; |
| @@ -998,13 +999,13 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, | |||
| 998 | bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; | 999 | bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; |
| 999 | 1000 | ||
| 1000 | bnx2i_conn->gen_pdu.req_bd_tbl = | 1001 | bnx2i_conn->gen_pdu.req_bd_tbl = |
| 1001 | dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 1002 | dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 1002 | &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); | 1003 | &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); |
| 1003 | if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) | 1004 | if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) |
| 1004 | goto login_req_bd_tbl_failure; | 1005 | goto login_req_bd_tbl_failure; |
| 1005 | 1006 | ||
| 1006 | bnx2i_conn->gen_pdu.resp_bd_tbl = | 1007 | bnx2i_conn->gen_pdu.resp_bd_tbl = |
| 1007 | dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 1008 | dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 1008 | &bnx2i_conn->gen_pdu.resp_bd_dma, | 1009 | &bnx2i_conn->gen_pdu.resp_bd_dma, |
| 1009 | GFP_KERNEL); | 1010 | GFP_KERNEL); |
| 1010 | if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) | 1011 | if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) |
| @@ -1013,7 +1014,7 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, | |||
| 1013 | return 0; | 1014 | return 0; |
| 1014 | 1015 | ||
| 1015 | login_resp_bd_tbl_failure: | 1016 | login_resp_bd_tbl_failure: |
| 1016 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 1017 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 1017 | bnx2i_conn->gen_pdu.req_bd_tbl, | 1018 | bnx2i_conn->gen_pdu.req_bd_tbl, |
| 1018 | bnx2i_conn->gen_pdu.req_bd_dma); | 1019 | bnx2i_conn->gen_pdu.req_bd_dma); |
| 1019 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; | 1020 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; |
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 4911310a38f5..22a9bb1abae1 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h | |||
| @@ -311,9 +311,8 @@ static inline struct Scsi_Host *to_shost(struct isci_host *ihost) | |||
| 311 | } | 311 | } |
| 312 | 312 | ||
| 313 | #define for_each_isci_host(id, ihost, pdev) \ | 313 | #define for_each_isci_host(id, ihost, pdev) \ |
| 314 | for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ | 314 | for (id = 0; id < SCI_MAX_CONTROLLERS && \ |
| 315 | id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ | 315 | (ihost = to_pci_info(pdev)->hosts[id]); id++) |
| 316 | ihost = to_pci_info(pdev)->hosts[++id]) | ||
| 317 | 316 | ||
| 318 | static inline void wait_for_start(struct isci_host *ihost) | 317 | static inline void wait_for_start(struct isci_host *ihost) |
| 319 | { | 318 | { |
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index 85c77f6b802b..ac879745ef80 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c | |||
| @@ -615,13 +615,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost, | |||
| 615 | SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); | 615 | SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); |
| 616 | } else { | 616 | } else { |
| 617 | /* the phy is already the part of the port */ | 617 | /* the phy is already the part of the port */ |
| 618 | u32 port_state = iport->sm.current_state_id; | ||
| 619 | |||
| 620 | /* if the PORT'S state is resetting then the link up is from | ||
| 621 | * port hard reset in this case, we need to tell the port | ||
| 622 | * that link up is recieved | ||
| 623 | */ | ||
| 624 | BUG_ON(port_state != SCI_PORT_RESETTING); | ||
| 625 | port_agent->phy_ready_mask |= 1 << phy_index; | 618 | port_agent->phy_ready_mask |= 1 << phy_index; |
| 626 | sci_port_link_up(iport, iphy); | 619 | sci_port_link_up(iport, iphy); |
| 627 | } | 620 | } |
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 0d30ca849e8f..5d6fda72d659 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c | |||
| @@ -801,7 +801,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev) | |||
| 801 | /* XXX: need to cleanup any ireqs targeting this | 801 | /* XXX: need to cleanup any ireqs targeting this |
| 802 | * domain_device | 802 | * domain_device |
| 803 | */ | 803 | */ |
| 804 | ret = TMF_RESP_FUNC_COMPLETE; | 804 | ret = -ENODEV; |
| 805 | goto out; | 805 | goto out; |
| 806 | } | 806 | } |
| 807 | 807 | ||
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index e1fe95ef23e1..266724b6b899 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
| @@ -2996,8 +2996,7 @@ struct qla_hw_data { | |||
| 2996 | IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ | 2996 | IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ |
| 2997 | IS_QLA8044(ha)) | 2997 | IS_QLA8044(ha)) |
| 2998 | #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) | 2998 | #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) |
| 2999 | #define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ | 2999 | #define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled) |
| 3000 | IS_QLA83XX(ha)) && (ha)->flags.msix_enabled) | ||
| 3001 | #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) | 3000 | #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) |
| 3002 | #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) | 3001 | #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) |
| 3003 | #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) | 3002 | #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 9bc86b9e86b1..0a1dcb43d18b 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
| @@ -2880,6 +2880,7 @@ static int | |||
| 2880 | qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) | 2880 | qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) |
| 2881 | { | 2881 | { |
| 2882 | #define MIN_MSIX_COUNT 2 | 2882 | #define MIN_MSIX_COUNT 2 |
| 2883 | #define ATIO_VECTOR 2 | ||
| 2883 | int i, ret; | 2884 | int i, ret; |
| 2884 | struct msix_entry *entries; | 2885 | struct msix_entry *entries; |
| 2885 | struct qla_msix_entry *qentry; | 2886 | struct qla_msix_entry *qentry; |
| @@ -2936,34 +2937,47 @@ msix_failed: | |||
| 2936 | } | 2937 | } |
| 2937 | 2938 | ||
| 2938 | /* Enable MSI-X vectors for the base queue */ | 2939 | /* Enable MSI-X vectors for the base queue */ |
| 2939 | for (i = 0; i < ha->msix_count; i++) { | 2940 | for (i = 0; i < 2; i++) { |
| 2940 | qentry = &ha->msix_entries[i]; | 2941 | qentry = &ha->msix_entries[i]; |
| 2941 | if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { | 2942 | if (IS_P3P_TYPE(ha)) |
| 2942 | ret = request_irq(qentry->vector, | ||
| 2943 | qla83xx_msix_entries[i].handler, | ||
| 2944 | 0, qla83xx_msix_entries[i].name, rsp); | ||
| 2945 | } else if (IS_P3P_TYPE(ha)) { | ||
| 2946 | ret = request_irq(qentry->vector, | 2943 | ret = request_irq(qentry->vector, |
| 2947 | qla82xx_msix_entries[i].handler, | 2944 | qla82xx_msix_entries[i].handler, |
| 2948 | 0, qla82xx_msix_entries[i].name, rsp); | 2945 | 0, qla82xx_msix_entries[i].name, rsp); |
| 2949 | } else { | 2946 | else |
| 2950 | ret = request_irq(qentry->vector, | 2947 | ret = request_irq(qentry->vector, |
| 2951 | msix_entries[i].handler, | 2948 | msix_entries[i].handler, |
| 2952 | 0, msix_entries[i].name, rsp); | 2949 | 0, msix_entries[i].name, rsp); |
| 2953 | } | 2950 | if (ret) |
| 2954 | if (ret) { | 2951 | goto msix_register_fail; |
| 2955 | ql_log(ql_log_fatal, vha, 0x00cb, | ||
| 2956 | "MSI-X: unable to register handler -- %x/%d.\n", | ||
| 2957 | qentry->vector, ret); | ||
| 2958 | qla24xx_disable_msix(ha); | ||
| 2959 | ha->mqenable = 0; | ||
| 2960 | goto msix_out; | ||
| 2961 | } | ||
| 2962 | qentry->have_irq = 1; | 2952 | qentry->have_irq = 1; |
| 2963 | qentry->rsp = rsp; | 2953 | qentry->rsp = rsp; |
| 2964 | rsp->msix = qentry; | 2954 | rsp->msix = qentry; |
| 2965 | } | 2955 | } |
| 2966 | 2956 | ||
| 2957 | /* | ||
| 2958 | * If target mode is enable, also request the vector for the ATIO | ||
| 2959 | * queue. | ||
| 2960 | */ | ||
| 2961 | if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { | ||
| 2962 | qentry = &ha->msix_entries[ATIO_VECTOR]; | ||
| 2963 | ret = request_irq(qentry->vector, | ||
| 2964 | qla83xx_msix_entries[ATIO_VECTOR].handler, | ||
| 2965 | 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); | ||
| 2966 | qentry->have_irq = 1; | ||
| 2967 | qentry->rsp = rsp; | ||
| 2968 | rsp->msix = qentry; | ||
| 2969 | } | ||
| 2970 | |||
| 2971 | msix_register_fail: | ||
| 2972 | if (ret) { | ||
| 2973 | ql_log(ql_log_fatal, vha, 0x00cb, | ||
| 2974 | "MSI-X: unable to register handler -- %x/%d.\n", | ||
| 2975 | qentry->vector, ret); | ||
| 2976 | qla24xx_disable_msix(ha); | ||
| 2977 | ha->mqenable = 0; | ||
| 2978 | goto msix_out; | ||
| 2979 | } | ||
| 2980 | |||
| 2967 | /* Enable MSI-X vector for response queue update for queue 0 */ | 2981 | /* Enable MSI-X vector for response queue update for queue 0 */ |
| 2968 | if (IS_QLA83XX(ha)) { | 2982 | if (IS_QLA83XX(ha)) { |
| 2969 | if (ha->msixbase && ha->mqiobase && | 2983 | if (ha->msixbase && ha->mqiobase && |
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 17d740427240..9969fa1ef7c4 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
| @@ -1419,6 +1419,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice) | |||
| 1419 | { | 1419 | { |
| 1420 | struct stor_mem_pools *memp = sdevice->hostdata; | 1420 | struct stor_mem_pools *memp = sdevice->hostdata; |
| 1421 | 1421 | ||
| 1422 | if (!memp) | ||
| 1423 | return; | ||
| 1424 | |||
| 1422 | mempool_destroy(memp->request_mempool); | 1425 | mempool_destroy(memp->request_mempool); |
| 1423 | kmem_cache_destroy(memp->request_pool); | 1426 | kmem_cache_destroy(memp->request_pool); |
| 1424 | kfree(memp); | 1427 | kfree(memp); |
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c index 31534b51715a..c3b2fb9b6713 100644 --- a/drivers/spi/spi-ath79.c +++ b/drivers/spi/spi-ath79.c | |||
| @@ -132,9 +132,9 @@ static int ath79_spi_setup_cs(struct spi_device *spi) | |||
| 132 | 132 | ||
| 133 | flags = GPIOF_DIR_OUT; | 133 | flags = GPIOF_DIR_OUT; |
| 134 | if (spi->mode & SPI_CS_HIGH) | 134 | if (spi->mode & SPI_CS_HIGH) |
| 135 | flags |= GPIOF_INIT_HIGH; | ||
| 136 | else | ||
| 137 | flags |= GPIOF_INIT_LOW; | 135 | flags |= GPIOF_INIT_LOW; |
| 136 | else | ||
| 137 | flags |= GPIOF_INIT_HIGH; | ||
| 138 | 138 | ||
| 139 | status = gpio_request_one(cdata->gpio, flags, | 139 | status = gpio_request_one(cdata->gpio, flags, |
| 140 | dev_name(&spi->dev)); | 140 | dev_name(&spi->dev)); |
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index b0842f751016..5d7b07f08326 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
| @@ -1455,6 +1455,14 @@ static int atmel_spi_suspend(struct device *dev) | |||
| 1455 | { | 1455 | { |
| 1456 | struct spi_master *master = dev_get_drvdata(dev); | 1456 | struct spi_master *master = dev_get_drvdata(dev); |
| 1457 | struct atmel_spi *as = spi_master_get_devdata(master); | 1457 | struct atmel_spi *as = spi_master_get_devdata(master); |
| 1458 | int ret; | ||
| 1459 | |||
| 1460 | /* Stop the queue running */ | ||
| 1461 | ret = spi_master_suspend(master); | ||
| 1462 | if (ret) { | ||
| 1463 | dev_warn(dev, "cannot suspend master\n"); | ||
| 1464 | return ret; | ||
| 1465 | } | ||
| 1458 | 1466 | ||
| 1459 | clk_disable_unprepare(as->clk); | 1467 | clk_disable_unprepare(as->clk); |
| 1460 | return 0; | 1468 | return 0; |
| @@ -1464,9 +1472,16 @@ static int atmel_spi_resume(struct device *dev) | |||
| 1464 | { | 1472 | { |
| 1465 | struct spi_master *master = dev_get_drvdata(dev); | 1473 | struct spi_master *master = dev_get_drvdata(dev); |
| 1466 | struct atmel_spi *as = spi_master_get_devdata(master); | 1474 | struct atmel_spi *as = spi_master_get_devdata(master); |
| 1475 | int ret; | ||
| 1467 | 1476 | ||
| 1468 | clk_prepare_enable(as->clk); | 1477 | clk_prepare_enable(as->clk); |
| 1469 | return 0; | 1478 | |
| 1479 | /* Start the queue running */ | ||
| 1480 | ret = spi_master_resume(master); | ||
| 1481 | if (ret) | ||
| 1482 | dev_err(dev, "problem starting queue (%d)\n", ret); | ||
| 1483 | |||
| 1484 | return ret; | ||
| 1470 | } | 1485 | } |
| 1471 | 1486 | ||
| 1472 | static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume); | 1487 | static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume); |
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c index cabed8f9119e..28ae470397a9 100644 --- a/drivers/spi/spi-coldfire-qspi.c +++ b/drivers/spi/spi-coldfire-qspi.c | |||
| @@ -514,7 +514,8 @@ static int mcfqspi_resume(struct device *dev) | |||
| 514 | #ifdef CONFIG_PM_RUNTIME | 514 | #ifdef CONFIG_PM_RUNTIME |
| 515 | static int mcfqspi_runtime_suspend(struct device *dev) | 515 | static int mcfqspi_runtime_suspend(struct device *dev) |
| 516 | { | 516 | { |
| 517 | struct mcfqspi *mcfqspi = dev_get_drvdata(dev); | 517 | struct spi_master *master = dev_get_drvdata(dev); |
| 518 | struct mcfqspi *mcfqspi = spi_master_get_devdata(master); | ||
| 518 | 519 | ||
| 519 | clk_disable(mcfqspi->clk); | 520 | clk_disable(mcfqspi->clk); |
| 520 | 521 | ||
| @@ -523,7 +524,8 @@ static int mcfqspi_runtime_suspend(struct device *dev) | |||
| 523 | 524 | ||
| 524 | static int mcfqspi_runtime_resume(struct device *dev) | 525 | static int mcfqspi_runtime_resume(struct device *dev) |
| 525 | { | 526 | { |
| 526 | struct mcfqspi *mcfqspi = dev_get_drvdata(dev); | 527 | struct spi_master *master = dev_get_drvdata(dev); |
| 528 | struct mcfqspi *mcfqspi = spi_master_get_devdata(master); | ||
| 527 | 529 | ||
| 528 | clk_enable(mcfqspi->clk); | 530 | clk_enable(mcfqspi->clk); |
| 529 | 531 | ||
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index ec79f726672a..a25392065d9b 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c | |||
| @@ -420,7 +420,6 @@ static int dspi_suspend(struct device *dev) | |||
| 420 | 420 | ||
| 421 | static int dspi_resume(struct device *dev) | 421 | static int dspi_resume(struct device *dev) |
| 422 | { | 422 | { |
| 423 | |||
| 424 | struct spi_master *master = dev_get_drvdata(dev); | 423 | struct spi_master *master = dev_get_drvdata(dev); |
| 425 | struct fsl_dspi *dspi = spi_master_get_devdata(master); | 424 | struct fsl_dspi *dspi = spi_master_get_devdata(master); |
| 426 | 425 | ||
| @@ -504,7 +503,7 @@ static int dspi_probe(struct platform_device *pdev) | |||
| 504 | clk_prepare_enable(dspi->clk); | 503 | clk_prepare_enable(dspi->clk); |
| 505 | 504 | ||
| 506 | init_waitqueue_head(&dspi->waitq); | 505 | init_waitqueue_head(&dspi->waitq); |
| 507 | platform_set_drvdata(pdev, dspi); | 506 | platform_set_drvdata(pdev, master); |
| 508 | 507 | ||
| 509 | ret = spi_bitbang_start(&dspi->bitbang); | 508 | ret = spi_bitbang_start(&dspi->bitbang); |
| 510 | if (ret != 0) { | 509 | if (ret != 0) { |
| @@ -525,7 +524,8 @@ out_master_put: | |||
| 525 | 524 | ||
| 526 | static int dspi_remove(struct platform_device *pdev) | 525 | static int dspi_remove(struct platform_device *pdev) |
| 527 | { | 526 | { |
| 528 | struct fsl_dspi *dspi = platform_get_drvdata(pdev); | 527 | struct spi_master *master = platform_get_drvdata(pdev); |
| 528 | struct fsl_dspi *dspi = spi_master_get_devdata(master); | ||
| 529 | 529 | ||
| 530 | /* Disconnect from the SPI framework */ | 530 | /* Disconnect from the SPI framework */ |
| 531 | spi_bitbang_stop(&dspi->bitbang); | 531 | spi_bitbang_stop(&dspi->bitbang); |
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index a5474ef9d2a0..47f15d97e7fa 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c | |||
| @@ -948,8 +948,8 @@ static int spi_imx_remove(struct platform_device *pdev) | |||
| 948 | spi_bitbang_stop(&spi_imx->bitbang); | 948 | spi_bitbang_stop(&spi_imx->bitbang); |
| 949 | 949 | ||
| 950 | writel(0, spi_imx->base + MXC_CSPICTRL); | 950 | writel(0, spi_imx->base + MXC_CSPICTRL); |
| 951 | clk_disable_unprepare(spi_imx->clk_ipg); | 951 | clk_unprepare(spi_imx->clk_ipg); |
| 952 | clk_disable_unprepare(spi_imx->clk_per); | 952 | clk_unprepare(spi_imx->clk_per); |
| 953 | spi_master_put(master); | 953 | spi_master_put(master); |
| 954 | 954 | ||
| 955 | return 0; | 955 | return 0; |
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 2e7f38c7a961..88eb57e858b3 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c | |||
| @@ -915,7 +915,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) | |||
| 915 | /* Set Tx DMA */ | 915 | /* Set Tx DMA */ |
| 916 | param = &dma->param_tx; | 916 | param = &dma->param_tx; |
| 917 | param->dma_dev = &dma_dev->dev; | 917 | param->dma_dev = &dma_dev->dev; |
| 918 | param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */ | 918 | param->chan_id = data->ch * 2; /* Tx = 0, 2 */; |
| 919 | param->tx_reg = data->io_base_addr + PCH_SPDWR; | 919 | param->tx_reg = data->io_base_addr + PCH_SPDWR; |
| 920 | param->width = width; | 920 | param->width = width; |
| 921 | chan = dma_request_channel(mask, pch_spi_filter, param); | 921 | chan = dma_request_channel(mask, pch_spi_filter, param); |
| @@ -930,7 +930,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) | |||
| 930 | /* Set Rx DMA */ | 930 | /* Set Rx DMA */ |
| 931 | param = &dma->param_rx; | 931 | param = &dma->param_rx; |
| 932 | param->dma_dev = &dma_dev->dev; | 932 | param->dma_dev = &dma_dev->dev; |
| 933 | param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */ | 933 | param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */; |
| 934 | param->rx_reg = data->io_base_addr + PCH_SPDRR; | 934 | param->rx_reg = data->io_base_addr + PCH_SPDRR; |
| 935 | param->width = width; | 935 | param->width = width; |
| 936 | chan = dma_request_channel(mask, pch_spi_filter, param); | 936 | chan = dma_request_channel(mask, pch_spi_filter, param); |
| @@ -1452,6 +1452,11 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev) | |||
| 1452 | 1452 | ||
| 1453 | pch_spi_set_master_mode(master); | 1453 | pch_spi_set_master_mode(master); |
| 1454 | 1454 | ||
| 1455 | if (use_dma) { | ||
| 1456 | dev_info(&plat_dev->dev, "Use DMA for data transfers\n"); | ||
| 1457 | pch_alloc_dma_buf(board_dat, data); | ||
| 1458 | } | ||
| 1459 | |||
| 1455 | ret = spi_register_master(master); | 1460 | ret = spi_register_master(master); |
| 1456 | if (ret != 0) { | 1461 | if (ret != 0) { |
| 1457 | dev_err(&plat_dev->dev, | 1462 | dev_err(&plat_dev->dev, |
| @@ -1459,14 +1464,10 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev) | |||
| 1459 | goto err_spi_register_master; | 1464 | goto err_spi_register_master; |
| 1460 | } | 1465 | } |
| 1461 | 1466 | ||
| 1462 | if (use_dma) { | ||
| 1463 | dev_info(&plat_dev->dev, "Use DMA for data transfers\n"); | ||
| 1464 | pch_alloc_dma_buf(board_dat, data); | ||
| 1465 | } | ||
| 1466 | |||
| 1467 | return 0; | 1467 | return 0; |
| 1468 | 1468 | ||
| 1469 | err_spi_register_master: | 1469 | err_spi_register_master: |
| 1470 | pch_free_dma_buf(board_dat, data); | ||
| 1470 | free_irq(board_dat->pdev->irq, data); | 1471 | free_irq(board_dat->pdev->irq, data); |
| 1471 | err_request_irq: | 1472 | err_request_irq: |
| 1472 | pch_spi_free_resources(board_dat, data); | 1473 | pch_spi_free_resources(board_dat, data); |
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c index 4a08e16e42f7..79206cb3fb94 100644 --- a/drivers/staging/cxt1e1/linux.c +++ b/drivers/staging/cxt1e1/linux.c | |||
| @@ -866,6 +866,8 @@ c4_ioctl (struct net_device *ndev, struct ifreq *ifr, int cmd) | |||
| 866 | _IOC_SIZE (iocmd)); | 866 | _IOC_SIZE (iocmd)); |
| 867 | #endif | 867 | #endif |
| 868 | iolen = _IOC_SIZE (iocmd); | 868 | iolen = _IOC_SIZE (iocmd); |
| 869 | if (iolen > sizeof(arg)) | ||
| 870 | return -EFAULT; | ||
| 869 | data = ifr->ifr_data + sizeof (iocmd); | 871 | data = ifr->ifr_data + sizeof (iocmd); |
| 870 | if (copy_from_user (&arg, data, iolen)) | 872 | if (copy_from_user (&arg, data, iolen)) |
| 871 | return -EFAULT; | 873 | return -EFAULT; |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 7f1a7ce4b771..b83ec378d04f 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
| @@ -785,7 +785,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) | |||
| 785 | spin_unlock_bh(&conn->cmd_lock); | 785 | spin_unlock_bh(&conn->cmd_lock); |
| 786 | 786 | ||
| 787 | list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { | 787 | list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { |
| 788 | list_del(&cmd->i_conn_node); | 788 | list_del_init(&cmd->i_conn_node); |
| 789 | iscsit_free_cmd(cmd, false); | 789 | iscsit_free_cmd(cmd, false); |
| 790 | } | 790 | } |
| 791 | } | 791 | } |
| @@ -3708,7 +3708,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state | |||
| 3708 | break; | 3708 | break; |
| 3709 | case ISTATE_REMOVE: | 3709 | case ISTATE_REMOVE: |
| 3710 | spin_lock_bh(&conn->cmd_lock); | 3710 | spin_lock_bh(&conn->cmd_lock); |
| 3711 | list_del(&cmd->i_conn_node); | 3711 | list_del_init(&cmd->i_conn_node); |
| 3712 | spin_unlock_bh(&conn->cmd_lock); | 3712 | spin_unlock_bh(&conn->cmd_lock); |
| 3713 | 3713 | ||
| 3714 | iscsit_free_cmd(cmd, false); | 3714 | iscsit_free_cmd(cmd, false); |
| @@ -4151,7 +4151,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) | |||
| 4151 | spin_lock_bh(&conn->cmd_lock); | 4151 | spin_lock_bh(&conn->cmd_lock); |
| 4152 | list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { | 4152 | list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { |
| 4153 | 4153 | ||
| 4154 | list_del(&cmd->i_conn_node); | 4154 | list_del_init(&cmd->i_conn_node); |
| 4155 | spin_unlock_bh(&conn->cmd_lock); | 4155 | spin_unlock_bh(&conn->cmd_lock); |
| 4156 | 4156 | ||
| 4157 | iscsit_increment_maxcmdsn(cmd, sess); | 4157 | iscsit_increment_maxcmdsn(cmd, sess); |
| @@ -4196,6 +4196,10 @@ int iscsit_close_connection( | |||
| 4196 | iscsit_stop_timers_for_cmds(conn); | 4196 | iscsit_stop_timers_for_cmds(conn); |
| 4197 | iscsit_stop_nopin_response_timer(conn); | 4197 | iscsit_stop_nopin_response_timer(conn); |
| 4198 | iscsit_stop_nopin_timer(conn); | 4198 | iscsit_stop_nopin_timer(conn); |
| 4199 | |||
| 4200 | if (conn->conn_transport->iscsit_wait_conn) | ||
| 4201 | conn->conn_transport->iscsit_wait_conn(conn); | ||
| 4202 | |||
| 4199 | iscsit_free_queue_reqs_for_conn(conn); | 4203 | iscsit_free_queue_reqs_for_conn(conn); |
| 4200 | 4204 | ||
| 4201 | /* | 4205 | /* |
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 33be1fb1df32..4ca8fd2a70db 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c | |||
| @@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) | |||
| 138 | list_for_each_entry_safe(cmd, cmd_tmp, | 138 | list_for_each_entry_safe(cmd, cmd_tmp, |
| 139 | &cr->conn_recovery_cmd_list, i_conn_node) { | 139 | &cr->conn_recovery_cmd_list, i_conn_node) { |
| 140 | 140 | ||
| 141 | list_del(&cmd->i_conn_node); | 141 | list_del_init(&cmd->i_conn_node); |
| 142 | cmd->conn = NULL; | 142 | cmd->conn = NULL; |
| 143 | spin_unlock(&cr->conn_recovery_cmd_lock); | 143 | spin_unlock(&cr->conn_recovery_cmd_lock); |
| 144 | iscsit_free_cmd(cmd, true); | 144 | iscsit_free_cmd(cmd, true); |
| @@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) | |||
| 160 | list_for_each_entry_safe(cmd, cmd_tmp, | 160 | list_for_each_entry_safe(cmd, cmd_tmp, |
| 161 | &cr->conn_recovery_cmd_list, i_conn_node) { | 161 | &cr->conn_recovery_cmd_list, i_conn_node) { |
| 162 | 162 | ||
| 163 | list_del(&cmd->i_conn_node); | 163 | list_del_init(&cmd->i_conn_node); |
| 164 | cmd->conn = NULL; | 164 | cmd->conn = NULL; |
| 165 | spin_unlock(&cr->conn_recovery_cmd_lock); | 165 | spin_unlock(&cr->conn_recovery_cmd_lock); |
| 166 | iscsit_free_cmd(cmd, true); | 166 | iscsit_free_cmd(cmd, true); |
| @@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery( | |||
| 216 | } | 216 | } |
| 217 | cr = cmd->cr; | 217 | cr = cmd->cr; |
| 218 | 218 | ||
| 219 | list_del(&cmd->i_conn_node); | 219 | list_del_init(&cmd->i_conn_node); |
| 220 | return --cr->cmd_count; | 220 | return --cr->cmd_count; |
| 221 | } | 221 | } |
| 222 | 222 | ||
| @@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) | |||
| 297 | if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) | 297 | if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) |
| 298 | continue; | 298 | continue; |
| 299 | 299 | ||
| 300 | list_del(&cmd->i_conn_node); | 300 | list_del_init(&cmd->i_conn_node); |
| 301 | 301 | ||
| 302 | spin_unlock_bh(&conn->cmd_lock); | 302 | spin_unlock_bh(&conn->cmd_lock); |
| 303 | iscsit_free_cmd(cmd, true); | 303 | iscsit_free_cmd(cmd, true); |
| @@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) | |||
| 335 | /* | 335 | /* |
| 336 | * Only perform connection recovery on ISCSI_OP_SCSI_CMD or | 336 | * Only perform connection recovery on ISCSI_OP_SCSI_CMD or |
| 337 | * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call | 337 | * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call |
| 338 | * list_del(&cmd->i_conn_node); to release the command to the | 338 | * list_del_init(&cmd->i_conn_node); to release the command to the |
| 339 | * session pool and remove it from the connection's list. | 339 | * session pool and remove it from the connection's list. |
| 340 | * | 340 | * |
| 341 | * Also stop the DataOUT timer, which will be restarted after | 341 | * Also stop the DataOUT timer, which will be restarted after |
| @@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) | |||
| 351 | " CID: %hu\n", cmd->iscsi_opcode, | 351 | " CID: %hu\n", cmd->iscsi_opcode, |
| 352 | cmd->init_task_tag, cmd->cmd_sn, conn->cid); | 352 | cmd->init_task_tag, cmd->cmd_sn, conn->cid); |
| 353 | 353 | ||
| 354 | list_del(&cmd->i_conn_node); | 354 | list_del_init(&cmd->i_conn_node); |
| 355 | spin_unlock_bh(&conn->cmd_lock); | 355 | spin_unlock_bh(&conn->cmd_lock); |
| 356 | iscsit_free_cmd(cmd, true); | 356 | iscsit_free_cmd(cmd, true); |
| 357 | spin_lock_bh(&conn->cmd_lock); | 357 | spin_lock_bh(&conn->cmd_lock); |
| @@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) | |||
| 371 | */ | 371 | */ |
| 372 | if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && | 372 | if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && |
| 373 | iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { | 373 | iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { |
| 374 | list_del(&cmd->i_conn_node); | 374 | list_del_init(&cmd->i_conn_node); |
| 375 | spin_unlock_bh(&conn->cmd_lock); | 375 | spin_unlock_bh(&conn->cmd_lock); |
| 376 | iscsit_free_cmd(cmd, true); | 376 | iscsit_free_cmd(cmd, true); |
| 377 | spin_lock_bh(&conn->cmd_lock); | 377 | spin_lock_bh(&conn->cmd_lock); |
| @@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) | |||
| 393 | 393 | ||
| 394 | cmd->sess = conn->sess; | 394 | cmd->sess = conn->sess; |
| 395 | 395 | ||
| 396 | list_del(&cmd->i_conn_node); | 396 | list_del_init(&cmd->i_conn_node); |
| 397 | spin_unlock_bh(&conn->cmd_lock); | 397 | spin_unlock_bh(&conn->cmd_lock); |
| 398 | 398 | ||
| 399 | iscsit_free_all_datain_reqs(cmd); | 399 | iscsit_free_all_datain_reqs(cmd); |
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 39761837608d..44a5471de00f 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c | |||
| @@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np( | |||
| 137 | list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { | 137 | list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { |
| 138 | 138 | ||
| 139 | spin_lock(&tpg->tpg_state_lock); | 139 | spin_lock(&tpg->tpg_state_lock); |
| 140 | if (tpg->tpg_state == TPG_STATE_FREE) { | 140 | if (tpg->tpg_state != TPG_STATE_ACTIVE) { |
| 141 | spin_unlock(&tpg->tpg_state_lock); | 141 | spin_unlock(&tpg->tpg_state_lock); |
| 142 | continue; | 142 | continue; |
| 143 | } | 143 | } |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 42f18fc1067b..77e6531fb0a1 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
| @@ -1079,25 +1079,31 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, | |||
| 1079 | left = sectors * dev->prot_length; | 1079 | left = sectors * dev->prot_length; |
| 1080 | 1080 | ||
| 1081 | for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { | 1081 | for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { |
| 1082 | 1082 | unsigned int psg_len, copied = 0; | |
| 1083 | len = min(psg->length, left); | ||
| 1084 | if (offset >= sg->length) { | ||
| 1085 | sg = sg_next(sg); | ||
| 1086 | offset = 0; | ||
| 1087 | } | ||
| 1088 | 1083 | ||
| 1089 | paddr = kmap_atomic(sg_page(psg)) + psg->offset; | 1084 | paddr = kmap_atomic(sg_page(psg)) + psg->offset; |
| 1090 | addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; | 1085 | psg_len = min(left, psg->length); |
| 1091 | 1086 | while (psg_len) { | |
| 1092 | if (read) | 1087 | len = min(psg_len, sg->length - offset); |
| 1093 | memcpy(paddr, addr, len); | 1088 | addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; |
| 1094 | else | 1089 | |
| 1095 | memcpy(addr, paddr, len); | 1090 | if (read) |
| 1096 | 1091 | memcpy(paddr + copied, addr, len); | |
| 1097 | left -= len; | 1092 | else |
| 1098 | offset += len; | 1093 | memcpy(addr, paddr + copied, len); |
| 1094 | |||
| 1095 | left -= len; | ||
| 1096 | offset += len; | ||
| 1097 | copied += len; | ||
| 1098 | psg_len -= len; | ||
| 1099 | |||
| 1100 | if (offset >= sg->length) { | ||
| 1101 | sg = sg_next(sg); | ||
| 1102 | offset = 0; | ||
| 1103 | } | ||
| 1104 | kunmap_atomic(addr); | ||
| 1105 | } | ||
| 1099 | kunmap_atomic(paddr); | 1106 | kunmap_atomic(paddr); |
| 1100 | kunmap_atomic(addr); | ||
| 1101 | } | 1107 | } |
| 1102 | } | 1108 | } |
| 1103 | 1109 | ||
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 35c066489a19..5f88d767671e 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
| @@ -136,6 +136,7 @@ config SPEAR_THERMAL | |||
| 136 | config RCAR_THERMAL | 136 | config RCAR_THERMAL |
| 137 | tristate "Renesas R-Car thermal driver" | 137 | tristate "Renesas R-Car thermal driver" |
| 138 | depends on ARCH_SHMOBILE || COMPILE_TEST | 138 | depends on ARCH_SHMOBILE || COMPILE_TEST |
| 139 | depends on HAS_IOMEM | ||
| 139 | help | 140 | help |
| 140 | Enable this to plug the R-Car thermal sensor driver into the Linux | 141 | Enable this to plug the R-Car thermal sensor driver into the Linux |
| 141 | thermal framework. | 142 | thermal framework. |
| @@ -210,8 +211,16 @@ config ACPI_INT3403_THERMAL | |||
| 210 | tristate "ACPI INT3403 thermal driver" | 211 | tristate "ACPI INT3403 thermal driver" |
| 211 | depends on X86 && ACPI | 212 | depends on X86 && ACPI |
| 212 | help | 213 | help |
| 213 | This driver uses ACPI INT3403 device objects. If present, it will | 214 | Newer laptops and tablets that use ACPI may have thermal sensors |
| 214 | register each INT3403 thermal sensor as a thermal zone. | 215 | outside the core CPU/SOC for thermal safety reasons. These |
| 216 | temperature sensors are also exposed for the OS to use via the so | ||
| 217 | called INT3403 ACPI object. This driver will, on devices that have | ||
| 218 | such sensors, expose the temperature information from these sensors | ||
| 219 | to userspace via the normal thermal framework. This means that a wide | ||
| 220 | range of applications and GUI widgets can show this information to | ||
| 221 | the user or use this information for making decisions. For example, | ||
| 222 | the Intel Thermal Daemon can use this information to allow the user | ||
| 223 | to select his laptop to run without turning on the fans. | ||
| 215 | 224 | ||
| 216 | menu "Texas Instruments thermal drivers" | 225 | menu "Texas Instruments thermal drivers" |
| 217 | source "drivers/thermal/ti-soc-thermal/Kconfig" | 226 | source "drivers/thermal/ti-soc-thermal/Kconfig" |
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 338a88bf6662..71b0ec0c370d 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c | |||
| @@ -56,10 +56,15 @@ static LIST_HEAD(thermal_governor_list); | |||
| 56 | static DEFINE_MUTEX(thermal_list_lock); | 56 | static DEFINE_MUTEX(thermal_list_lock); |
| 57 | static DEFINE_MUTEX(thermal_governor_lock); | 57 | static DEFINE_MUTEX(thermal_governor_lock); |
| 58 | 58 | ||
| 59 | static struct thermal_governor *def_governor; | ||
| 60 | |||
| 59 | static struct thermal_governor *__find_governor(const char *name) | 61 | static struct thermal_governor *__find_governor(const char *name) |
| 60 | { | 62 | { |
| 61 | struct thermal_governor *pos; | 63 | struct thermal_governor *pos; |
| 62 | 64 | ||
| 65 | if (!name || !name[0]) | ||
| 66 | return def_governor; | ||
| 67 | |||
| 63 | list_for_each_entry(pos, &thermal_governor_list, governor_list) | 68 | list_for_each_entry(pos, &thermal_governor_list, governor_list) |
| 64 | if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH)) | 69 | if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH)) |
| 65 | return pos; | 70 | return pos; |
| @@ -82,17 +87,23 @@ int thermal_register_governor(struct thermal_governor *governor) | |||
| 82 | if (__find_governor(governor->name) == NULL) { | 87 | if (__find_governor(governor->name) == NULL) { |
| 83 | err = 0; | 88 | err = 0; |
| 84 | list_add(&governor->governor_list, &thermal_governor_list); | 89 | list_add(&governor->governor_list, &thermal_governor_list); |
| 90 | if (!def_governor && !strncmp(governor->name, | ||
| 91 | DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH)) | ||
| 92 | def_governor = governor; | ||
| 85 | } | 93 | } |
| 86 | 94 | ||
| 87 | mutex_lock(&thermal_list_lock); | 95 | mutex_lock(&thermal_list_lock); |
| 88 | 96 | ||
| 89 | list_for_each_entry(pos, &thermal_tz_list, node) { | 97 | list_for_each_entry(pos, &thermal_tz_list, node) { |
| 98 | /* | ||
| 99 | * only thermal zones with specified tz->tzp->governor_name | ||
| 100 | * may run with tz->govenor unset | ||
| 101 | */ | ||
| 90 | if (pos->governor) | 102 | if (pos->governor) |
| 91 | continue; | 103 | continue; |
| 92 | if (pos->tzp) | 104 | |
| 93 | name = pos->tzp->governor_name; | 105 | name = pos->tzp->governor_name; |
| 94 | else | 106 | |
| 95 | name = DEFAULT_THERMAL_GOVERNOR; | ||
| 96 | if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH)) | 107 | if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH)) |
| 97 | pos->governor = governor; | 108 | pos->governor = governor; |
| 98 | } | 109 | } |
| @@ -342,8 +353,8 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz) | |||
| 342 | static void handle_non_critical_trips(struct thermal_zone_device *tz, | 353 | static void handle_non_critical_trips(struct thermal_zone_device *tz, |
| 343 | int trip, enum thermal_trip_type trip_type) | 354 | int trip, enum thermal_trip_type trip_type) |
| 344 | { | 355 | { |
| 345 | if (tz->governor) | 356 | tz->governor ? tz->governor->throttle(tz, trip) : |
| 346 | tz->governor->throttle(tz, trip); | 357 | def_governor->throttle(tz, trip); |
| 347 | } | 358 | } |
| 348 | 359 | ||
| 349 | static void handle_critical_trips(struct thermal_zone_device *tz, | 360 | static void handle_critical_trips(struct thermal_zone_device *tz, |
| @@ -1107,7 +1118,7 @@ __thermal_cooling_device_register(struct device_node *np, | |||
| 1107 | INIT_LIST_HEAD(&cdev->thermal_instances); | 1118 | INIT_LIST_HEAD(&cdev->thermal_instances); |
| 1108 | cdev->np = np; | 1119 | cdev->np = np; |
| 1109 | cdev->ops = ops; | 1120 | cdev->ops = ops; |
| 1110 | cdev->updated = true; | 1121 | cdev->updated = false; |
| 1111 | cdev->device.class = &thermal_class; | 1122 | cdev->device.class = &thermal_class; |
| 1112 | cdev->devdata = devdata; | 1123 | cdev->devdata = devdata; |
| 1113 | dev_set_name(&cdev->device, "cooling_device%d", cdev->id); | 1124 | dev_set_name(&cdev->device, "cooling_device%d", cdev->id); |
| @@ -1533,7 +1544,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, | |||
| 1533 | if (tz->tzp) | 1544 | if (tz->tzp) |
| 1534 | tz->governor = __find_governor(tz->tzp->governor_name); | 1545 | tz->governor = __find_governor(tz->tzp->governor_name); |
| 1535 | else | 1546 | else |
| 1536 | tz->governor = __find_governor(DEFAULT_THERMAL_GOVERNOR); | 1547 | tz->governor = def_governor; |
| 1537 | 1548 | ||
| 1538 | mutex_unlock(&thermal_governor_lock); | 1549 | mutex_unlock(&thermal_governor_lock); |
| 1539 | 1550 | ||
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c index 972e1c73722a..081fd7e6a9f0 100644 --- a/drivers/thermal/x86_pkg_temp_thermal.c +++ b/drivers/thermal/x86_pkg_temp_thermal.c | |||
| @@ -68,6 +68,10 @@ struct phy_dev_entry { | |||
| 68 | struct thermal_zone_device *tzone; | 68 | struct thermal_zone_device *tzone; |
| 69 | }; | 69 | }; |
| 70 | 70 | ||
| 71 | static const struct thermal_zone_params pkg_temp_tz_params = { | ||
| 72 | .no_hwmon = true, | ||
| 73 | }; | ||
| 74 | |||
| 71 | /* List maintaining number of package instances */ | 75 | /* List maintaining number of package instances */ |
| 72 | static LIST_HEAD(phy_dev_list); | 76 | static LIST_HEAD(phy_dev_list); |
| 73 | static DEFINE_MUTEX(phy_dev_list_mutex); | 77 | static DEFINE_MUTEX(phy_dev_list_mutex); |
| @@ -394,7 +398,6 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) | |||
| 394 | int err; | 398 | int err; |
| 395 | u32 tj_max; | 399 | u32 tj_max; |
| 396 | struct phy_dev_entry *phy_dev_entry; | 400 | struct phy_dev_entry *phy_dev_entry; |
| 397 | char buffer[30]; | ||
| 398 | int thres_count; | 401 | int thres_count; |
| 399 | u32 eax, ebx, ecx, edx; | 402 | u32 eax, ebx, ecx, edx; |
| 400 | u8 *temp; | 403 | u8 *temp; |
| @@ -440,13 +443,11 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) | |||
| 440 | phy_dev_entry->first_cpu = cpu; | 443 | phy_dev_entry->first_cpu = cpu; |
| 441 | phy_dev_entry->tj_max = tj_max; | 444 | phy_dev_entry->tj_max = tj_max; |
| 442 | phy_dev_entry->ref_cnt = 1; | 445 | phy_dev_entry->ref_cnt = 1; |
| 443 | snprintf(buffer, sizeof(buffer), "pkg-temp-%d\n", | 446 | phy_dev_entry->tzone = thermal_zone_device_register("x86_pkg_temp", |
| 444 | phy_dev_entry->phys_proc_id); | ||
| 445 | phy_dev_entry->tzone = thermal_zone_device_register(buffer, | ||
| 446 | thres_count, | 447 | thres_count, |
| 447 | (thres_count == MAX_NUMBER_OF_TRIPS) ? | 448 | (thres_count == MAX_NUMBER_OF_TRIPS) ? |
| 448 | 0x03 : 0x01, | 449 | 0x03 : 0x01, |
| 449 | phy_dev_entry, &tzone_ops, NULL, 0, 0); | 450 | phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0); |
| 450 | if (IS_ERR(phy_dev_entry->tzone)) { | 451 | if (IS_ERR(phy_dev_entry->tzone)) { |
| 451 | err = PTR_ERR(phy_dev_entry->tzone); | 452 | err = PTR_ERR(phy_dev_entry->tzone); |
| 452 | goto err_ret_free; | 453 | goto err_ret_free; |
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index cf86e729532b..dc697cee248a 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c | |||
| @@ -433,13 +433,10 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign | |||
| 433 | unsigned long flags; | 433 | unsigned long flags; |
| 434 | int locked = 1; | 434 | int locked = 1; |
| 435 | 435 | ||
| 436 | local_irq_save(flags); | 436 | if (port->sysrq || oops_in_progress) |
| 437 | if (port->sysrq) { | 437 | locked = spin_trylock_irqsave(&port->lock, flags); |
| 438 | locked = 0; | 438 | else |
| 439 | } else if (oops_in_progress) { | 439 | spin_lock_irqsave(&port->lock, flags); |
| 440 | locked = spin_trylock(&port->lock); | ||
| 441 | } else | ||
| 442 | spin_lock(&port->lock); | ||
| 443 | 440 | ||
| 444 | while (n > 0) { | 441 | while (n > 0) { |
| 445 | unsigned long ra = __pa(con_write_page); | 442 | unsigned long ra = __pa(con_write_page); |
| @@ -470,8 +467,7 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign | |||
| 470 | } | 467 | } |
| 471 | 468 | ||
| 472 | if (locked) | 469 | if (locked) |
| 473 | spin_unlock(&port->lock); | 470 | spin_unlock_irqrestore(&port->lock, flags); |
| 474 | local_irq_restore(flags); | ||
| 475 | } | 471 | } |
| 476 | 472 | ||
| 477 | static inline void sunhv_console_putchar(struct uart_port *port, char c) | 473 | static inline void sunhv_console_putchar(struct uart_port *port, char c) |
| @@ -492,7 +488,10 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig | |||
| 492 | unsigned long flags; | 488 | unsigned long flags; |
| 493 | int i, locked = 1; | 489 | int i, locked = 1; |
| 494 | 490 | ||
| 495 | local_irq_save(flags); | 491 | if (port->sysrq || oops_in_progress) |
| 492 | locked = spin_trylock_irqsave(&port->lock, flags); | ||
| 493 | else | ||
| 494 | spin_lock_irqsave(&port->lock, flags); | ||
| 496 | if (port->sysrq) { | 495 | if (port->sysrq) { |
| 497 | locked = 0; | 496 | locked = 0; |
| 498 | } else if (oops_in_progress) { | 497 | } else if (oops_in_progress) { |
| @@ -507,8 +506,7 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig | |||
| 507 | } | 506 | } |
| 508 | 507 | ||
| 509 | if (locked) | 508 | if (locked) |
| 510 | spin_unlock(&port->lock); | 509 | spin_unlock_irqrestore(&port->lock, flags); |
| 511 | local_irq_restore(flags); | ||
| 512 | } | 510 | } |
| 513 | 511 | ||
| 514 | static struct console sunhv_console = { | 512 | static struct console sunhv_console = { |
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c index 380fb5355cb2..5faa8e905e98 100644 --- a/drivers/tty/serial/sunsab.c +++ b/drivers/tty/serial/sunsab.c | |||
| @@ -844,20 +844,16 @@ static void sunsab_console_write(struct console *con, const char *s, unsigned n) | |||
| 844 | unsigned long flags; | 844 | unsigned long flags; |
| 845 | int locked = 1; | 845 | int locked = 1; |
| 846 | 846 | ||
| 847 | local_irq_save(flags); | 847 | if (up->port.sysrq || oops_in_progress) |
| 848 | if (up->port.sysrq) { | 848 | locked = spin_trylock_irqsave(&up->port.lock, flags); |
| 849 | locked = 0; | 849 | else |
| 850 | } else if (oops_in_progress) { | 850 | spin_lock_irqsave(&up->port.lock, flags); |
| 851 | locked = spin_trylock(&up->port.lock); | ||
| 852 | } else | ||
| 853 | spin_lock(&up->port.lock); | ||
| 854 | 851 | ||
| 855 | uart_console_write(&up->port, s, n, sunsab_console_putchar); | 852 | uart_console_write(&up->port, s, n, sunsab_console_putchar); |
| 856 | sunsab_tec_wait(up); | 853 | sunsab_tec_wait(up); |
| 857 | 854 | ||
| 858 | if (locked) | 855 | if (locked) |
| 859 | spin_unlock(&up->port.lock); | 856 | spin_unlock_irqrestore(&up->port.lock, flags); |
| 860 | local_irq_restore(flags); | ||
| 861 | } | 857 | } |
| 862 | 858 | ||
| 863 | static int sunsab_console_setup(struct console *con, char *options) | 859 | static int sunsab_console_setup(struct console *con, char *options) |
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index db79b76f5c8e..9a0f24f83720 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c | |||
| @@ -1295,13 +1295,10 @@ static void sunsu_console_write(struct console *co, const char *s, | |||
| 1295 | unsigned int ier; | 1295 | unsigned int ier; |
| 1296 | int locked = 1; | 1296 | int locked = 1; |
| 1297 | 1297 | ||
| 1298 | local_irq_save(flags); | 1298 | if (up->port.sysrq || oops_in_progress) |
| 1299 | if (up->port.sysrq) { | 1299 | locked = spin_trylock_irqsave(&up->port.lock, flags); |
| 1300 | locked = 0; | 1300 | else |
| 1301 | } else if (oops_in_progress) { | 1301 | spin_lock_irqsave(&up->port.lock, flags); |
| 1302 | locked = spin_trylock(&up->port.lock); | ||
| 1303 | } else | ||
| 1304 | spin_lock(&up->port.lock); | ||
| 1305 | 1302 | ||
| 1306 | /* | 1303 | /* |
| 1307 | * First save the UER then disable the interrupts | 1304 | * First save the UER then disable the interrupts |
| @@ -1319,8 +1316,7 @@ static void sunsu_console_write(struct console *co, const char *s, | |||
| 1319 | serial_out(up, UART_IER, ier); | 1316 | serial_out(up, UART_IER, ier); |
| 1320 | 1317 | ||
| 1321 | if (locked) | 1318 | if (locked) |
| 1322 | spin_unlock(&up->port.lock); | 1319 | spin_unlock_irqrestore(&up->port.lock, flags); |
| 1323 | local_irq_restore(flags); | ||
| 1324 | } | 1320 | } |
| 1325 | 1321 | ||
| 1326 | /* | 1322 | /* |
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c index 45a8c6aa5837..a2c40ed287d2 100644 --- a/drivers/tty/serial/sunzilog.c +++ b/drivers/tty/serial/sunzilog.c | |||
| @@ -1195,20 +1195,16 @@ sunzilog_console_write(struct console *con, const char *s, unsigned int count) | |||
| 1195 | unsigned long flags; | 1195 | unsigned long flags; |
| 1196 | int locked = 1; | 1196 | int locked = 1; |
| 1197 | 1197 | ||
| 1198 | local_irq_save(flags); | 1198 | if (up->port.sysrq || oops_in_progress) |
| 1199 | if (up->port.sysrq) { | 1199 | locked = spin_trylock_irqsave(&up->port.lock, flags); |
| 1200 | locked = 0; | 1200 | else |
| 1201 | } else if (oops_in_progress) { | 1201 | spin_lock_irqsave(&up->port.lock, flags); |
| 1202 | locked = spin_trylock(&up->port.lock); | ||
| 1203 | } else | ||
| 1204 | spin_lock(&up->port.lock); | ||
| 1205 | 1202 | ||
| 1206 | uart_console_write(&up->port, s, count, sunzilog_putchar); | 1203 | uart_console_write(&up->port, s, count, sunzilog_putchar); |
| 1207 | udelay(2); | 1204 | udelay(2); |
| 1208 | 1205 | ||
| 1209 | if (locked) | 1206 | if (locked) |
| 1210 | spin_unlock(&up->port.lock); | 1207 | spin_unlock_irqrestore(&up->port.lock, flags); |
| 1211 | local_irq_restore(flags); | ||
| 1212 | } | 1208 | } |
| 1213 | 1209 | ||
| 1214 | static int __init sunzilog_console_setup(struct console *con, char *options) | 1210 | static int __init sunzilog_console_setup(struct console *con, char *options) |
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 8d72f0c65937..062967c90b2a 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c | |||
| @@ -717,6 +717,10 @@ int usb_get_configuration(struct usb_device *dev) | |||
| 717 | result = -ENOMEM; | 717 | result = -ENOMEM; |
| 718 | goto err; | 718 | goto err; |
| 719 | } | 719 | } |
| 720 | |||
| 721 | if (dev->quirks & USB_QUIRK_DELAY_INIT) | ||
| 722 | msleep(100); | ||
| 723 | |||
| 720 | result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, | 724 | result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, |
| 721 | bigbuffer, length); | 725 | bigbuffer, length); |
| 722 | if (result < 0) { | 726 | if (result < 0) { |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 8f37063c0a49..739ee8e8bdfd 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
| @@ -47,6 +47,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 47 | /* Microsoft LifeCam-VX700 v2.0 */ | 47 | /* Microsoft LifeCam-VX700 v2.0 */ |
| 48 | { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, | 48 | { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 49 | 49 | ||
| 50 | /* Logitech HD Pro Webcams C920 and C930e */ | ||
| 51 | { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, | ||
| 52 | { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, | ||
| 53 | |||
| 50 | /* Logitech Quickcam Fusion */ | 54 | /* Logitech Quickcam Fusion */ |
| 51 | { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, | 55 | { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 52 | 56 | ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 6fe577d46fa2..924a6ccdb622 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
| @@ -4733,6 +4733,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | |||
| 4733 | /* Accept arbitrarily long scatter-gather lists */ | 4733 | /* Accept arbitrarily long scatter-gather lists */ |
| 4734 | hcd->self.sg_tablesize = ~0; | 4734 | hcd->self.sg_tablesize = ~0; |
| 4735 | 4735 | ||
| 4736 | /* support to build packet from discontinuous buffers */ | ||
| 4737 | hcd->self.no_sg_constraint = 1; | ||
| 4738 | |||
| 4736 | /* XHCI controllers don't stop the ep queue on short packets :| */ | 4739 | /* XHCI controllers don't stop the ep queue on short packets :| */ |
| 4737 | hcd->self.no_stop_on_short = 1; | 4740 | hcd->self.no_stop_on_short = 1; |
| 4738 | 4741 | ||
| @@ -4757,14 +4760,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | |||
| 4757 | /* xHCI private pointer was set in xhci_pci_probe for the second | 4760 | /* xHCI private pointer was set in xhci_pci_probe for the second |
| 4758 | * registered roothub. | 4761 | * registered roothub. |
| 4759 | */ | 4762 | */ |
| 4760 | xhci = hcd_to_xhci(hcd); | ||
| 4761 | /* | ||
| 4762 | * Support arbitrarily aligned sg-list entries on hosts without | ||
| 4763 | * TD fragment rules (which are currently unsupported). | ||
| 4764 | */ | ||
| 4765 | if (xhci->hci_version < 0x100) | ||
| 4766 | hcd->self.no_sg_constraint = 1; | ||
| 4767 | |||
| 4768 | return 0; | 4763 | return 0; |
| 4769 | } | 4764 | } |
| 4770 | 4765 | ||
| @@ -4793,9 +4788,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | |||
| 4793 | if (xhci->hci_version > 0x96) | 4788 | if (xhci->hci_version > 0x96) |
| 4794 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; | 4789 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; |
| 4795 | 4790 | ||
| 4796 | if (xhci->hci_version < 0x100) | ||
| 4797 | hcd->self.no_sg_constraint = 1; | ||
| 4798 | |||
| 4799 | /* Make sure the HC is halted. */ | 4791 | /* Make sure the HC is halted. */ |
| 4800 | retval = xhci_halt(xhci); | 4792 | retval = xhci_halt(xhci); |
| 4801 | if (retval) | 4793 | if (retval) |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 4fb7a8f83c8a..54af4e933695 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
| @@ -186,12 +186,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn) | |||
| 186 | if (pfn_valid(pfn)) { | 186 | if (pfn_valid(pfn)) { |
| 187 | bool reserved; | 187 | bool reserved; |
| 188 | struct page *tail = pfn_to_page(pfn); | 188 | struct page *tail = pfn_to_page(pfn); |
| 189 | struct page *head = compound_trans_head(tail); | 189 | struct page *head = compound_head(tail); |
| 190 | reserved = !!(PageReserved(head)); | 190 | reserved = !!(PageReserved(head)); |
| 191 | if (head != tail) { | 191 | if (head != tail) { |
| 192 | /* | 192 | /* |
| 193 | * "head" is not a dangling pointer | 193 | * "head" is not a dangling pointer |
| 194 | * (compound_trans_head takes care of that) | 194 | * (compound_head takes care of that) |
| 195 | * but the hugepage may have been split | 195 | * but the hugepage may have been split |
| 196 | * from under us (and we may not hold a | 196 | * from under us (and we may not hold a |
| 197 | * reference count on the head page so it can | 197 | * reference count on the head page so it can |
