diff options
Diffstat (limited to 'drivers')
166 files changed, 2655 insertions, 2750 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index c95df0b8c880..5d9248526d78 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
| @@ -235,17 +235,6 @@ config ACPI_INITRD_TABLE_OVERRIDE | |||
| 235 | initrd, therefore it's safe to say Y. | 235 | initrd, therefore it's safe to say Y. |
| 236 | See Documentation/acpi/initrd_table_override.txt for details | 236 | See Documentation/acpi/initrd_table_override.txt for details |
| 237 | 237 | ||
| 238 | config ACPI_BLACKLIST_YEAR | ||
| 239 | int "Disable ACPI for systems before Jan 1st this year" if X86_32 | ||
| 240 | default 0 | ||
| 241 | help | ||
| 242 | Enter a 4-digit year, e.g., 2001, to disable ACPI by default | ||
| 243 | on platforms with DMI BIOS date before January 1st that year. | ||
| 244 | "acpi=force" can be used to override this mechanism. | ||
| 245 | |||
| 246 | Enter 0 to disable this mechanism and allow ACPI to | ||
| 247 | run by default no matter what the year. (default) | ||
| 248 | |||
| 249 | config ACPI_DEBUG | 238 | config ACPI_DEBUG |
| 250 | bool "Debug Statements" | 239 | bool "Debug Statements" |
| 251 | default n | 240 | default n |
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index b9f0d5f4bba5..8711e3797165 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c | |||
| @@ -56,7 +56,6 @@ static int ac_sleep_before_get_state_ms; | |||
| 56 | 56 | ||
| 57 | struct acpi_ac { | 57 | struct acpi_ac { |
| 58 | struct power_supply charger; | 58 | struct power_supply charger; |
| 59 | struct acpi_device *adev; | ||
| 60 | struct platform_device *pdev; | 59 | struct platform_device *pdev; |
| 61 | unsigned long long state; | 60 | unsigned long long state; |
| 62 | }; | 61 | }; |
| @@ -70,8 +69,9 @@ struct acpi_ac { | |||
| 70 | static int acpi_ac_get_state(struct acpi_ac *ac) | 69 | static int acpi_ac_get_state(struct acpi_ac *ac) |
| 71 | { | 70 | { |
| 72 | acpi_status status; | 71 | acpi_status status; |
| 72 | acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev); | ||
| 73 | 73 | ||
| 74 | status = acpi_evaluate_integer(ac->adev->handle, "_PSR", NULL, | 74 | status = acpi_evaluate_integer(handle, "_PSR", NULL, |
| 75 | &ac->state); | 75 | &ac->state); |
| 76 | if (ACPI_FAILURE(status)) { | 76 | if (ACPI_FAILURE(status)) { |
| 77 | ACPI_EXCEPTION((AE_INFO, status, | 77 | ACPI_EXCEPTION((AE_INFO, status, |
| @@ -119,6 +119,7 @@ static enum power_supply_property ac_props[] = { | |||
| 119 | static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) | 119 | static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) |
| 120 | { | 120 | { |
| 121 | struct acpi_ac *ac = data; | 121 | struct acpi_ac *ac = data; |
| 122 | struct acpi_device *adev; | ||
| 122 | 123 | ||
| 123 | if (!ac) | 124 | if (!ac) |
| 124 | return; | 125 | return; |
| @@ -141,10 +142,11 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) | |||
| 141 | msleep(ac_sleep_before_get_state_ms); | 142 | msleep(ac_sleep_before_get_state_ms); |
| 142 | 143 | ||
| 143 | acpi_ac_get_state(ac); | 144 | acpi_ac_get_state(ac); |
| 144 | acpi_bus_generate_netlink_event(ac->adev->pnp.device_class, | 145 | adev = ACPI_COMPANION(&ac->pdev->dev); |
| 146 | acpi_bus_generate_netlink_event(adev->pnp.device_class, | ||
| 145 | dev_name(&ac->pdev->dev), | 147 | dev_name(&ac->pdev->dev), |
| 146 | event, (u32) ac->state); | 148 | event, (u32) ac->state); |
| 147 | acpi_notifier_call_chain(ac->adev, event, (u32) ac->state); | 149 | acpi_notifier_call_chain(adev, event, (u32) ac->state); |
| 148 | kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); | 150 | kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); |
| 149 | } | 151 | } |
| 150 | 152 | ||
| @@ -178,8 +180,8 @@ static int acpi_ac_probe(struct platform_device *pdev) | |||
| 178 | if (!pdev) | 180 | if (!pdev) |
| 179 | return -EINVAL; | 181 | return -EINVAL; |
| 180 | 182 | ||
| 181 | result = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev); | 183 | adev = ACPI_COMPANION(&pdev->dev); |
| 182 | if (result) | 184 | if (!adev) |
| 183 | return -ENODEV; | 185 | return -ENODEV; |
| 184 | 186 | ||
| 185 | ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); | 187 | ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); |
| @@ -188,7 +190,6 @@ static int acpi_ac_probe(struct platform_device *pdev) | |||
| 188 | 190 | ||
| 189 | strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); | 191 | strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); |
| 190 | strcpy(acpi_device_class(adev), ACPI_AC_CLASS); | 192 | strcpy(acpi_device_class(adev), ACPI_AC_CLASS); |
| 191 | ac->adev = adev; | ||
| 192 | ac->pdev = pdev; | 193 | ac->pdev = pdev; |
| 193 | platform_set_drvdata(pdev, ac); | 194 | platform_set_drvdata(pdev, ac); |
| 194 | 195 | ||
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index d3961014aad7..6745fe137b9e 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
| @@ -163,6 +163,15 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { | |||
| 163 | { "80860F41", (unsigned long)&byt_i2c_dev_desc }, | 163 | { "80860F41", (unsigned long)&byt_i2c_dev_desc }, |
| 164 | { "INT33B2", }, | 164 | { "INT33B2", }, |
| 165 | 165 | ||
| 166 | { "INT3430", (unsigned long)&lpt_dev_desc }, | ||
| 167 | { "INT3431", (unsigned long)&lpt_dev_desc }, | ||
| 168 | { "INT3432", (unsigned long)&lpt_dev_desc }, | ||
| 169 | { "INT3433", (unsigned long)&lpt_dev_desc }, | ||
| 170 | { "INT3434", (unsigned long)&lpt_uart_dev_desc }, | ||
| 171 | { "INT3435", (unsigned long)&lpt_uart_dev_desc }, | ||
| 172 | { "INT3436", (unsigned long)&lpt_sdio_dev_desc }, | ||
| 173 | { "INT3437", }, | ||
| 174 | |||
| 166 | { } | 175 | { } |
| 167 | }; | 176 | }; |
| 168 | 177 | ||
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 8a4cfc7e71f0..dbfe49e5fd63 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c | |||
| @@ -111,7 +111,7 @@ int acpi_create_platform_device(struct acpi_device *adev, | |||
| 111 | pdevinfo.id = -1; | 111 | pdevinfo.id = -1; |
| 112 | pdevinfo.res = resources; | 112 | pdevinfo.res = resources; |
| 113 | pdevinfo.num_res = count; | 113 | pdevinfo.num_res = count; |
| 114 | pdevinfo.acpi_node.handle = adev->handle; | 114 | pdevinfo.acpi_node.companion = adev; |
| 115 | pdev = platform_device_register_full(&pdevinfo); | 115 | pdev = platform_device_register_full(&pdevinfo); |
| 116 | if (IS_ERR(pdev)) { | 116 | if (IS_ERR(pdev)) { |
| 117 | dev_err(&adev->dev, "platform device creation failed: %ld\n", | 117 | dev_err(&adev->dev, "platform device creation failed: %ld\n", |
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index fb848378d582..078c4f7fe2dd 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c | |||
| @@ -75,39 +75,6 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = { | |||
| 75 | {""} | 75 | {""} |
| 76 | }; | 76 | }; |
| 77 | 77 | ||
| 78 | #if CONFIG_ACPI_BLACKLIST_YEAR | ||
| 79 | |||
| 80 | static int __init blacklist_by_year(void) | ||
| 81 | { | ||
| 82 | int year; | ||
| 83 | |||
| 84 | /* Doesn't exist? Likely an old system */ | ||
| 85 | if (!dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL)) { | ||
| 86 | printk(KERN_ERR PREFIX "no DMI BIOS year, " | ||
| 87 | "acpi=force is required to enable ACPI\n" ); | ||
| 88 | return 1; | ||
| 89 | } | ||
| 90 | /* 0? Likely a buggy new BIOS */ | ||
| 91 | if (year == 0) { | ||
| 92 | printk(KERN_ERR PREFIX "DMI BIOS year==0, " | ||
| 93 | "assuming ACPI-capable machine\n" ); | ||
| 94 | return 0; | ||
| 95 | } | ||
| 96 | if (year < CONFIG_ACPI_BLACKLIST_YEAR) { | ||
| 97 | printk(KERN_ERR PREFIX "BIOS age (%d) fails cutoff (%d), " | ||
| 98 | "acpi=force is required to enable ACPI\n", | ||
| 99 | year, CONFIG_ACPI_BLACKLIST_YEAR); | ||
| 100 | return 1; | ||
| 101 | } | ||
| 102 | return 0; | ||
| 103 | } | ||
| 104 | #else | ||
| 105 | static inline int blacklist_by_year(void) | ||
| 106 | { | ||
| 107 | return 0; | ||
| 108 | } | ||
| 109 | #endif | ||
| 110 | |||
| 111 | int __init acpi_blacklisted(void) | 78 | int __init acpi_blacklisted(void) |
| 112 | { | 79 | { |
| 113 | int i = 0; | 80 | int i = 0; |
| @@ -166,8 +133,6 @@ int __init acpi_blacklisted(void) | |||
| 166 | } | 133 | } |
| 167 | } | 134 | } |
| 168 | 135 | ||
| 169 | blacklisted += blacklist_by_year(); | ||
| 170 | |||
| 171 | dmi_check_system(acpi_osi_dmi_table); | 136 | dmi_check_system(acpi_osi_dmi_table); |
| 172 | 137 | ||
| 173 | return blacklisted; | 138 | return blacklisted; |
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index d42b2fb5a7e9..b3480cf7db1a 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c | |||
| @@ -22,16 +22,12 @@ | |||
| 22 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 22 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | #include <linux/device.h> | 25 | #include <linux/acpi.h> |
| 26 | #include <linux/export.h> | 26 | #include <linux/export.h> |
| 27 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
| 28 | #include <linux/pm_qos.h> | 28 | #include <linux/pm_qos.h> |
| 29 | #include <linux/pm_runtime.h> | 29 | #include <linux/pm_runtime.h> |
| 30 | 30 | ||
| 31 | #include <acpi/acpi.h> | ||
| 32 | #include <acpi/acpi_bus.h> | ||
| 33 | #include <acpi/acpi_drivers.h> | ||
| 34 | |||
| 35 | #include "internal.h" | 31 | #include "internal.h" |
| 36 | 32 | ||
| 37 | #define _COMPONENT ACPI_POWER_COMPONENT | 33 | #define _COMPONENT ACPI_POWER_COMPONENT |
| @@ -548,7 +544,7 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev, | |||
| 548 | */ | 544 | */ |
| 549 | int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) | 545 | int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) |
| 550 | { | 546 | { |
| 551 | acpi_handle handle = DEVICE_ACPI_HANDLE(dev); | 547 | acpi_handle handle = ACPI_HANDLE(dev); |
| 552 | struct acpi_device *adev; | 548 | struct acpi_device *adev; |
| 553 | int ret, d_min, d_max; | 549 | int ret, d_min, d_max; |
| 554 | 550 | ||
| @@ -656,7 +652,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) | |||
| 656 | if (!device_run_wake(phys_dev)) | 652 | if (!device_run_wake(phys_dev)) |
| 657 | return -EINVAL; | 653 | return -EINVAL; |
| 658 | 654 | ||
| 659 | handle = DEVICE_ACPI_HANDLE(phys_dev); | 655 | handle = ACPI_HANDLE(phys_dev); |
| 660 | if (!handle || acpi_bus_get_device(handle, &adev)) { | 656 | if (!handle || acpi_bus_get_device(handle, &adev)) { |
| 661 | dev_dbg(phys_dev, "ACPI handle without context in %s!\n", | 657 | dev_dbg(phys_dev, "ACPI handle without context in %s!\n", |
| 662 | __func__); | 658 | __func__); |
| @@ -700,7 +696,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable) | |||
| 700 | if (!device_can_wakeup(dev)) | 696 | if (!device_can_wakeup(dev)) |
| 701 | return -EINVAL; | 697 | return -EINVAL; |
| 702 | 698 | ||
| 703 | handle = DEVICE_ACPI_HANDLE(dev); | 699 | handle = ACPI_HANDLE(dev); |
| 704 | if (!handle || acpi_bus_get_device(handle, &adev)) { | 700 | if (!handle || acpi_bus_get_device(handle, &adev)) { |
| 705 | dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); | 701 | dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); |
| 706 | return -ENODEV; | 702 | return -ENODEV; |
| @@ -722,7 +718,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable) | |||
| 722 | */ | 718 | */ |
| 723 | struct acpi_device *acpi_dev_pm_get_node(struct device *dev) | 719 | struct acpi_device *acpi_dev_pm_get_node(struct device *dev) |
| 724 | { | 720 | { |
| 725 | acpi_handle handle = DEVICE_ACPI_HANDLE(dev); | 721 | acpi_handle handle = ACPI_HANDLE(dev); |
| 726 | struct acpi_device *adev; | 722 | struct acpi_device *adev; |
| 727 | 723 | ||
| 728 | return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL; | 724 | return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL; |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d5309fd49458..ba5b56db9d27 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -173,9 +173,10 @@ static void start_transaction(struct acpi_ec *ec) | |||
| 173 | static void advance_transaction(struct acpi_ec *ec, u8 status) | 173 | static void advance_transaction(struct acpi_ec *ec, u8 status) |
| 174 | { | 174 | { |
| 175 | unsigned long flags; | 175 | unsigned long flags; |
| 176 | struct transaction *t = ec->curr; | 176 | struct transaction *t; |
| 177 | 177 | ||
| 178 | spin_lock_irqsave(&ec->lock, flags); | 178 | spin_lock_irqsave(&ec->lock, flags); |
| 179 | t = ec->curr; | ||
| 179 | if (!t) | 180 | if (!t) |
| 180 | goto unlock; | 181 | goto unlock; |
| 181 | if (t->wlen > t->wi) { | 182 | if (t->wlen > t->wi) { |
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c index fdef416c0ff6..cae3b387b867 100644 --- a/drivers/acpi/event.c +++ b/drivers/acpi/event.c | |||
| @@ -78,15 +78,17 @@ enum { | |||
| 78 | #define ACPI_GENL_VERSION 0x01 | 78 | #define ACPI_GENL_VERSION 0x01 |
| 79 | #define ACPI_GENL_MCAST_GROUP_NAME "acpi_mc_group" | 79 | #define ACPI_GENL_MCAST_GROUP_NAME "acpi_mc_group" |
| 80 | 80 | ||
| 81 | static const struct genl_multicast_group acpi_event_mcgrps[] = { | ||
| 82 | { .name = ACPI_GENL_MCAST_GROUP_NAME, }, | ||
| 83 | }; | ||
| 84 | |||
| 81 | static struct genl_family acpi_event_genl_family = { | 85 | static struct genl_family acpi_event_genl_family = { |
| 82 | .id = GENL_ID_GENERATE, | 86 | .id = GENL_ID_GENERATE, |
| 83 | .name = ACPI_GENL_FAMILY_NAME, | 87 | .name = ACPI_GENL_FAMILY_NAME, |
| 84 | .version = ACPI_GENL_VERSION, | 88 | .version = ACPI_GENL_VERSION, |
| 85 | .maxattr = ACPI_GENL_ATTR_MAX, | 89 | .maxattr = ACPI_GENL_ATTR_MAX, |
| 86 | }; | 90 | .mcgrps = acpi_event_mcgrps, |
| 87 | 91 | .n_mcgrps = ARRAY_SIZE(acpi_event_mcgrps), | |
| 88 | static struct genl_multicast_group acpi_event_mcgrp = { | ||
| 89 | .name = ACPI_GENL_MCAST_GROUP_NAME, | ||
| 90 | }; | 92 | }; |
| 91 | 93 | ||
| 92 | int acpi_bus_generate_netlink_event(const char *device_class, | 94 | int acpi_bus_generate_netlink_event(const char *device_class, |
| @@ -141,7 +143,7 @@ int acpi_bus_generate_netlink_event(const char *device_class, | |||
| 141 | return result; | 143 | return result; |
| 142 | } | 144 | } |
| 143 | 145 | ||
| 144 | genlmsg_multicast(skb, 0, acpi_event_mcgrp.id, GFP_ATOMIC); | 146 | genlmsg_multicast(&acpi_event_genl_family, skb, 0, 0, GFP_ATOMIC); |
| 145 | return 0; | 147 | return 0; |
| 146 | } | 148 | } |
| 147 | 149 | ||
| @@ -149,18 +151,7 @@ EXPORT_SYMBOL(acpi_bus_generate_netlink_event); | |||
| 149 | 151 | ||
| 150 | static int acpi_event_genetlink_init(void) | 152 | static int acpi_event_genetlink_init(void) |
| 151 | { | 153 | { |
| 152 | int result; | 154 | return genl_register_family(&acpi_event_genl_family); |
| 153 | |||
| 154 | result = genl_register_family(&acpi_event_genl_family); | ||
| 155 | if (result) | ||
| 156 | return result; | ||
| 157 | |||
| 158 | result = genl_register_mc_group(&acpi_event_genl_family, | ||
| 159 | &acpi_event_mcgrp); | ||
| 160 | if (result) | ||
| 161 | genl_unregister_family(&acpi_event_genl_family); | ||
| 162 | |||
| 163 | return result; | ||
| 164 | } | 155 | } |
| 165 | 156 | ||
| 166 | #else | 157 | #else |
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 10f0f40587bb..a22a295edb69 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
| @@ -197,30 +197,28 @@ static void acpi_physnode_link_name(char *buf, unsigned int node_id) | |||
| 197 | 197 | ||
| 198 | int acpi_bind_one(struct device *dev, acpi_handle handle) | 198 | int acpi_bind_one(struct device *dev, acpi_handle handle) |
| 199 | { | 199 | { |
| 200 | struct acpi_device *acpi_dev; | 200 | struct acpi_device *acpi_dev = NULL; |
| 201 | acpi_status status; | ||
| 202 | struct acpi_device_physical_node *physical_node, *pn; | 201 | struct acpi_device_physical_node *physical_node, *pn; |
| 203 | char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; | 202 | char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; |
| 204 | struct list_head *physnode_list; | 203 | struct list_head *physnode_list; |
| 205 | unsigned int node_id; | 204 | unsigned int node_id; |
| 206 | int retval = -EINVAL; | 205 | int retval = -EINVAL; |
| 207 | 206 | ||
| 208 | if (ACPI_HANDLE(dev)) { | 207 | if (ACPI_COMPANION(dev)) { |
| 209 | if (handle) { | 208 | if (handle) { |
| 210 | dev_warn(dev, "ACPI handle is already set\n"); | 209 | dev_warn(dev, "ACPI companion already set\n"); |
| 211 | return -EINVAL; | 210 | return -EINVAL; |
| 212 | } else { | 211 | } else { |
| 213 | handle = ACPI_HANDLE(dev); | 212 | acpi_dev = ACPI_COMPANION(dev); |
| 214 | } | 213 | } |
| 214 | } else { | ||
| 215 | acpi_bus_get_device(handle, &acpi_dev); | ||
| 215 | } | 216 | } |
| 216 | if (!handle) | 217 | if (!acpi_dev) |
| 217 | return -EINVAL; | 218 | return -EINVAL; |
| 218 | 219 | ||
| 220 | get_device(&acpi_dev->dev); | ||
| 219 | get_device(dev); | 221 | get_device(dev); |
| 220 | status = acpi_bus_get_device(handle, &acpi_dev); | ||
| 221 | if (ACPI_FAILURE(status)) | ||
| 222 | goto err; | ||
| 223 | |||
| 224 | physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL); | 222 | physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL); |
| 225 | if (!physical_node) { | 223 | if (!physical_node) { |
| 226 | retval = -ENOMEM; | 224 | retval = -ENOMEM; |
| @@ -242,10 +240,11 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) | |||
| 242 | 240 | ||
| 243 | dev_warn(dev, "Already associated with ACPI node\n"); | 241 | dev_warn(dev, "Already associated with ACPI node\n"); |
| 244 | kfree(physical_node); | 242 | kfree(physical_node); |
| 245 | if (ACPI_HANDLE(dev) != handle) | 243 | if (ACPI_COMPANION(dev) != acpi_dev) |
| 246 | goto err; | 244 | goto err; |
| 247 | 245 | ||
| 248 | put_device(dev); | 246 | put_device(dev); |
| 247 | put_device(&acpi_dev->dev); | ||
| 249 | return 0; | 248 | return 0; |
| 250 | } | 249 | } |
| 251 | if (pn->node_id == node_id) { | 250 | if (pn->node_id == node_id) { |
| @@ -259,8 +258,8 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) | |||
| 259 | list_add(&physical_node->node, physnode_list); | 258 | list_add(&physical_node->node, physnode_list); |
| 260 | acpi_dev->physical_node_count++; | 259 | acpi_dev->physical_node_count++; |
| 261 | 260 | ||
| 262 | if (!ACPI_HANDLE(dev)) | 261 | if (!ACPI_COMPANION(dev)) |
| 263 | ACPI_HANDLE_SET(dev, acpi_dev->handle); | 262 | ACPI_COMPANION_SET(dev, acpi_dev); |
| 264 | 263 | ||
| 265 | acpi_physnode_link_name(physical_node_name, node_id); | 264 | acpi_physnode_link_name(physical_node_name, node_id); |
| 266 | retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, | 265 | retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, |
| @@ -283,27 +282,21 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) | |||
| 283 | return 0; | 282 | return 0; |
| 284 | 283 | ||
| 285 | err: | 284 | err: |
| 286 | ACPI_HANDLE_SET(dev, NULL); | 285 | ACPI_COMPANION_SET(dev, NULL); |
| 287 | put_device(dev); | 286 | put_device(dev); |
| 287 | put_device(&acpi_dev->dev); | ||
| 288 | return retval; | 288 | return retval; |
| 289 | } | 289 | } |
| 290 | EXPORT_SYMBOL_GPL(acpi_bind_one); | 290 | EXPORT_SYMBOL_GPL(acpi_bind_one); |
| 291 | 291 | ||
| 292 | int acpi_unbind_one(struct device *dev) | 292 | int acpi_unbind_one(struct device *dev) |
| 293 | { | 293 | { |
| 294 | struct acpi_device *acpi_dev = ACPI_COMPANION(dev); | ||
| 294 | struct acpi_device_physical_node *entry; | 295 | struct acpi_device_physical_node *entry; |
| 295 | struct acpi_device *acpi_dev; | ||
| 296 | acpi_status status; | ||
| 297 | 296 | ||
| 298 | if (!ACPI_HANDLE(dev)) | 297 | if (!acpi_dev) |
| 299 | return 0; | 298 | return 0; |
| 300 | 299 | ||
| 301 | status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev); | ||
| 302 | if (ACPI_FAILURE(status)) { | ||
| 303 | dev_err(dev, "Oops, ACPI handle corrupt in %s()\n", __func__); | ||
| 304 | return -EINVAL; | ||
| 305 | } | ||
| 306 | |||
| 307 | mutex_lock(&acpi_dev->physical_node_lock); | 300 | mutex_lock(&acpi_dev->physical_node_lock); |
| 308 | 301 | ||
| 309 | list_for_each_entry(entry, &acpi_dev->physical_node_list, node) | 302 | list_for_each_entry(entry, &acpi_dev->physical_node_list, node) |
| @@ -316,9 +309,10 @@ int acpi_unbind_one(struct device *dev) | |||
| 316 | acpi_physnode_link_name(physnode_name, entry->node_id); | 309 | acpi_physnode_link_name(physnode_name, entry->node_id); |
| 317 | sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name); | 310 | sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name); |
| 318 | sysfs_remove_link(&dev->kobj, "firmware_node"); | 311 | sysfs_remove_link(&dev->kobj, "firmware_node"); |
| 319 | ACPI_HANDLE_SET(dev, NULL); | 312 | ACPI_COMPANION_SET(dev, NULL); |
| 320 | /* acpi_bind_one() increase refcnt by one. */ | 313 | /* Drop references taken by acpi_bind_one(). */ |
| 321 | put_device(dev); | 314 | put_device(dev); |
| 315 | put_device(&acpi_dev->dev); | ||
| 322 | kfree(entry); | 316 | kfree(entry); |
| 323 | break; | 317 | break; |
| 324 | } | 318 | } |
| @@ -328,6 +322,15 @@ int acpi_unbind_one(struct device *dev) | |||
| 328 | } | 322 | } |
| 329 | EXPORT_SYMBOL_GPL(acpi_unbind_one); | 323 | EXPORT_SYMBOL_GPL(acpi_unbind_one); |
| 330 | 324 | ||
| 325 | void acpi_preset_companion(struct device *dev, acpi_handle parent, u64 addr) | ||
| 326 | { | ||
| 327 | struct acpi_device *adev; | ||
| 328 | |||
| 329 | if (!acpi_bus_get_device(acpi_get_child(parent, addr), &adev)) | ||
| 330 | ACPI_COMPANION_SET(dev, adev); | ||
| 331 | } | ||
| 332 | EXPORT_SYMBOL_GPL(acpi_preset_companion); | ||
| 333 | |||
| 331 | static int acpi_platform_notify(struct device *dev) | 334 | static int acpi_platform_notify(struct device *dev) |
| 332 | { | 335 | { |
| 333 | struct acpi_bus_type *type = acpi_get_bus_type(dev); | 336 | struct acpi_bus_type *type = acpi_get_bus_type(dev); |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 56f05869b08d..0703bff5e60e 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
| @@ -575,6 +575,7 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
| 575 | dev_err(&device->dev, | 575 | dev_err(&device->dev, |
| 576 | "Bus %04x:%02x not present in PCI namespace\n", | 576 | "Bus %04x:%02x not present in PCI namespace\n", |
| 577 | root->segment, (unsigned int)root->secondary.start); | 577 | root->segment, (unsigned int)root->secondary.start); |
| 578 | device->driver_data = NULL; | ||
| 578 | result = -ENODEV; | 579 | result = -ENODEV; |
| 579 | goto end; | 580 | goto end; |
| 580 | } | 581 | } |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 55f9dedbbf9f..15daa21fcd05 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
| @@ -289,24 +289,17 @@ void acpi_bus_device_eject(void *data, u32 ost_src) | |||
| 289 | { | 289 | { |
| 290 | struct acpi_device *device = data; | 290 | struct acpi_device *device = data; |
| 291 | acpi_handle handle = device->handle; | 291 | acpi_handle handle = device->handle; |
| 292 | struct acpi_scan_handler *handler; | ||
| 293 | u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; | 292 | u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; |
| 294 | int error; | 293 | int error; |
| 295 | 294 | ||
| 296 | lock_device_hotplug(); | 295 | lock_device_hotplug(); |
| 297 | mutex_lock(&acpi_scan_lock); | 296 | mutex_lock(&acpi_scan_lock); |
| 298 | 297 | ||
| 299 | handler = device->handler; | ||
| 300 | if (!handler || !handler->hotplug.enabled) { | ||
| 301 | put_device(&device->dev); | ||
| 302 | goto err_support; | ||
| 303 | } | ||
| 304 | |||
| 305 | if (ost_src == ACPI_NOTIFY_EJECT_REQUEST) | 298 | if (ost_src == ACPI_NOTIFY_EJECT_REQUEST) |
| 306 | acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, | 299 | acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, |
| 307 | ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); | 300 | ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); |
| 308 | 301 | ||
| 309 | if (handler->hotplug.mode == AHM_CONTAINER) | 302 | if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER) |
| 310 | kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); | 303 | kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); |
| 311 | 304 | ||
| 312 | error = acpi_scan_hot_remove(device); | 305 | error = acpi_scan_hot_remove(device); |
| @@ -411,8 +404,7 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data) | |||
| 411 | break; | 404 | break; |
| 412 | case ACPI_NOTIFY_EJECT_REQUEST: | 405 | case ACPI_NOTIFY_EJECT_REQUEST: |
| 413 | acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); | 406 | acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); |
| 414 | status = acpi_bus_get_device(handle, &adev); | 407 | if (acpi_bus_get_device(handle, &adev)) |
| 415 | if (ACPI_FAILURE(status)) | ||
| 416 | goto err_out; | 408 | goto err_out; |
| 417 | 409 | ||
| 418 | get_device(&adev->dev); | 410 | get_device(&adev->dev); |
| @@ -1997,6 +1989,7 @@ static int acpi_bus_scan_fixed(void) | |||
| 1997 | if (result) | 1989 | if (result) |
| 1998 | return result; | 1990 | return result; |
| 1999 | 1991 | ||
| 1992 | device->flags.match_driver = true; | ||
| 2000 | result = device_attach(&device->dev); | 1993 | result = device_attach(&device->dev); |
| 2001 | if (result < 0) | 1994 | if (result < 0) |
| 2002 | return result; | 1995 | return result; |
| @@ -2013,6 +2006,7 @@ static int acpi_bus_scan_fixed(void) | |||
| 2013 | if (result) | 2006 | if (result) |
| 2014 | return result; | 2007 | return result; |
| 2015 | 2008 | ||
| 2009 | device->flags.match_driver = true; | ||
| 2016 | result = device_attach(&device->dev); | 2010 | result = device_attach(&device->dev); |
| 2017 | } | 2011 | } |
| 2018 | 2012 | ||
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 18dbdff4656e..995e91bcb97b 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
| @@ -82,13 +82,6 @@ static bool allow_duplicates; | |||
| 82 | module_param(allow_duplicates, bool, 0644); | 82 | module_param(allow_duplicates, bool, 0644); |
| 83 | 83 | ||
| 84 | /* | 84 | /* |
| 85 | * Some BIOSes claim they use minimum backlight at boot, | ||
| 86 | * and this may bring dimming screen after boot | ||
| 87 | */ | ||
| 88 | static bool use_bios_initial_backlight = 1; | ||
| 89 | module_param(use_bios_initial_backlight, bool, 0644); | ||
| 90 | |||
| 91 | /* | ||
| 92 | * For Windows 8 systems: if set ture and the GPU driver has | 85 | * For Windows 8 systems: if set ture and the GPU driver has |
| 93 | * registered a backlight interface, skip registering ACPI video's. | 86 | * registered a backlight interface, skip registering ACPI video's. |
| 94 | */ | 87 | */ |
| @@ -406,12 +399,6 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d) | |||
| 406 | return 0; | 399 | return 0; |
| 407 | } | 400 | } |
| 408 | 401 | ||
| 409 | static int video_ignore_initial_backlight(const struct dmi_system_id *d) | ||
| 410 | { | ||
| 411 | use_bios_initial_backlight = 0; | ||
| 412 | return 0; | ||
| 413 | } | ||
| 414 | |||
| 415 | static struct dmi_system_id video_dmi_table[] __initdata = { | 402 | static struct dmi_system_id video_dmi_table[] __initdata = { |
| 416 | /* | 403 | /* |
| 417 | * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 | 404 | * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 |
| @@ -456,54 +443,6 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
| 456 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), | 443 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), |
| 457 | }, | 444 | }, |
| 458 | }, | 445 | }, |
| 459 | { | ||
| 460 | .callback = video_ignore_initial_backlight, | ||
| 461 | .ident = "HP Folio 13-2000", | ||
| 462 | .matches = { | ||
| 463 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), | ||
| 464 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"), | ||
| 465 | }, | ||
| 466 | }, | ||
| 467 | { | ||
| 468 | .callback = video_ignore_initial_backlight, | ||
| 469 | .ident = "Fujitsu E753", | ||
| 470 | .matches = { | ||
| 471 | DMI_MATCH(DMI_BOARD_VENDOR, "FUJITSU"), | ||
| 472 | DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E753"), | ||
| 473 | }, | ||
| 474 | }, | ||
| 475 | { | ||
| 476 | .callback = video_ignore_initial_backlight, | ||
| 477 | .ident = "HP Pavilion dm4", | ||
| 478 | .matches = { | ||
| 479 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), | ||
| 480 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"), | ||
| 481 | }, | ||
| 482 | }, | ||
| 483 | { | ||
| 484 | .callback = video_ignore_initial_backlight, | ||
| 485 | .ident = "HP Pavilion g6 Notebook PC", | ||
| 486 | .matches = { | ||
| 487 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), | ||
| 488 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"), | ||
| 489 | }, | ||
| 490 | }, | ||
| 491 | { | ||
| 492 | .callback = video_ignore_initial_backlight, | ||
| 493 | .ident = "HP 1000 Notebook PC", | ||
| 494 | .matches = { | ||
| 495 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), | ||
| 496 | DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"), | ||
| 497 | }, | ||
| 498 | }, | ||
| 499 | { | ||
| 500 | .callback = video_ignore_initial_backlight, | ||
| 501 | .ident = "HP Pavilion m4", | ||
| 502 | .matches = { | ||
| 503 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), | ||
| 504 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"), | ||
| 505 | }, | ||
| 506 | }, | ||
| 507 | {} | 446 | {} |
| 508 | }; | 447 | }; |
| 509 | 448 | ||
| @@ -839,20 +778,18 @@ acpi_video_init_brightness(struct acpi_video_device *device) | |||
| 839 | if (!device->cap._BQC) | 778 | if (!device->cap._BQC) |
| 840 | goto set_level; | 779 | goto set_level; |
| 841 | 780 | ||
| 842 | if (use_bios_initial_backlight) { | 781 | level = acpi_video_bqc_value_to_level(device, level_old); |
| 843 | level = acpi_video_bqc_value_to_level(device, level_old); | 782 | /* |
| 844 | /* | 783 | * On some buggy laptops, _BQC returns an uninitialized |
| 845 | * On some buggy laptops, _BQC returns an uninitialized | 784 | * value when invoked for the first time, i.e. |
| 846 | * value when invoked for the first time, i.e. | 785 | * level_old is invalid (no matter whether it's a level |
| 847 | * level_old is invalid (no matter whether it's a level | 786 | * or an index). Set the backlight to max_level in this case. |
| 848 | * or an index). Set the backlight to max_level in this case. | 787 | */ |
| 849 | */ | 788 | for (i = 2; i < br->count; i++) |
| 850 | for (i = 2; i < br->count; i++) | 789 | if (level == br->levels[i]) |
| 851 | if (level == br->levels[i]) | 790 | break; |
| 852 | break; | 791 | if (i == br->count || !level) |
| 853 | if (i == br->count || !level) | 792 | level = max_level; |
| 854 | level = max_level; | ||
| 855 | } | ||
| 856 | 793 | ||
| 857 | set_level: | 794 | set_level: |
| 858 | result = acpi_video_device_lcd_set_level(device, level); | 795 | result = acpi_video_device_lcd_set_level(device, level); |
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index ab714d2ad978..4372cfa883c9 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c | |||
| @@ -185,7 +185,7 @@ void ata_acpi_bind_port(struct ata_port *ap) | |||
| 185 | if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_handle) | 185 | if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_handle) |
| 186 | return; | 186 | return; |
| 187 | 187 | ||
| 188 | ACPI_HANDLE_SET(&ap->tdev, acpi_get_child(host_handle, ap->port_no)); | 188 | acpi_preset_companion(&ap->tdev, host_handle, ap->port_no); |
| 189 | 189 | ||
| 190 | if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0) | 190 | if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0) |
| 191 | ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; | 191 | ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; |
| @@ -222,7 +222,7 @@ void ata_acpi_bind_dev(struct ata_device *dev) | |||
| 222 | parent_handle = port_handle; | 222 | parent_handle = port_handle; |
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | ACPI_HANDLE_SET(&dev->tdev, acpi_get_child(parent_handle, adr)); | 225 | acpi_preset_companion(&dev->tdev, parent_handle, adr); |
| 226 | 226 | ||
| 227 | register_hotplug_dock_device(ata_dev_acpi_handle(dev), | 227 | register_hotplug_dock_device(ata_dev_acpi_handle(dev), |
| 228 | &ata_acpi_dev_dock_ops, dev, NULL, NULL); | 228 | &ata_acpi_dev_dock_ops, dev, NULL, NULL); |
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 853f610af28f..e88690ebfd82 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c | |||
| @@ -396,8 +396,7 @@ dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len) | |||
| 396 | struct dma_async_tx_descriptor *tx; | 396 | struct dma_async_tx_descriptor *tx; |
| 397 | struct dma_chan *chan = acdev->dma_chan; | 397 | struct dma_chan *chan = acdev->dma_chan; |
| 398 | dma_cookie_t cookie; | 398 | dma_cookie_t cookie; |
| 399 | unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | | 399 | unsigned long flags = DMA_PREP_INTERRUPT; |
| 400 | DMA_COMPL_SKIP_DEST_UNMAP; | ||
| 401 | int ret = 0; | 400 | int ret = 0; |
| 402 | 401 | ||
| 403 | tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); | 402 | tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); |
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 272f00927761..1bdf104e90bb 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c | |||
| @@ -3511,7 +3511,7 @@ static int init_card(struct atm_dev *dev) | |||
| 3511 | tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */ | 3511 | tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */ |
| 3512 | if (tmp) { | 3512 | if (tmp) { |
| 3513 | memcpy(card->atmdev->esi, tmp->dev_addr, 6); | 3513 | memcpy(card->atmdev->esi, tmp->dev_addr, 6); |
| 3514 | 3514 | dev_put(tmp); | |
| 3515 | printk("%s: ESI %pM\n", card->name, card->atmdev->esi); | 3515 | printk("%s: ESI %pM\n", card->name, card->atmdev->esi); |
| 3516 | } | 3516 | } |
| 3517 | /* | 3517 | /* |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 47051cd25113..3a94b799f166 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
| @@ -432,7 +432,7 @@ struct platform_device *platform_device_register_full( | |||
| 432 | goto err_alloc; | 432 | goto err_alloc; |
| 433 | 433 | ||
| 434 | pdev->dev.parent = pdevinfo->parent; | 434 | pdev->dev.parent = pdevinfo->parent; |
| 435 | ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle); | 435 | ACPI_COMPANION_SET(&pdev->dev, pdevinfo->acpi_node.companion); |
| 436 | 436 | ||
| 437 | if (pdevinfo->dma_mask) { | 437 | if (pdevinfo->dma_mask) { |
| 438 | /* | 438 | /* |
| @@ -463,7 +463,7 @@ struct platform_device *platform_device_register_full( | |||
| 463 | ret = platform_device_add(pdev); | 463 | ret = platform_device_add(pdev); |
| 464 | if (ret) { | 464 | if (ret) { |
| 465 | err: | 465 | err: |
| 466 | ACPI_HANDLE_SET(&pdev->dev, NULL); | 466 | ACPI_COMPANION_SET(&pdev->dev, NULL); |
| 467 | kfree(pdev->dev.dma_mask); | 467 | kfree(pdev->dev.dma_mask); |
| 468 | 468 | ||
| 469 | err_alloc: | 469 | err_alloc: |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c12e9b9556be..1b41fca3d65a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -1350,6 +1350,9 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
| 1350 | 1350 | ||
| 1351 | device_unlock(dev); | 1351 | device_unlock(dev); |
| 1352 | 1352 | ||
| 1353 | if (error) | ||
| 1354 | pm_runtime_put(dev); | ||
| 1355 | |||
| 1353 | return error; | 1356 | return error; |
| 1354 | } | 1357 | } |
| 1355 | 1358 | ||
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 588479d58f52..6a680d4de7f1 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
| @@ -199,15 +199,16 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) | |||
| 199 | 199 | ||
| 200 | spin_lock_irqsave(&vblk->vq_lock, flags); | 200 | spin_lock_irqsave(&vblk->vq_lock, flags); |
| 201 | if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) { | 201 | if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) { |
| 202 | virtqueue_kick(vblk->vq); | ||
| 202 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | 203 | spin_unlock_irqrestore(&vblk->vq_lock, flags); |
| 203 | blk_mq_stop_hw_queue(hctx); | 204 | blk_mq_stop_hw_queue(hctx); |
| 204 | virtqueue_kick(vblk->vq); | ||
| 205 | return BLK_MQ_RQ_QUEUE_BUSY; | 205 | return BLK_MQ_RQ_QUEUE_BUSY; |
| 206 | } | 206 | } |
| 207 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | ||
| 208 | 207 | ||
| 209 | if (last) | 208 | if (last) |
| 210 | virtqueue_kick(vblk->vq); | 209 | virtqueue_kick(vblk->vq); |
| 210 | |||
| 211 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | ||
| 211 | return BLK_MQ_RQ_QUEUE_OK; | 212 | return BLK_MQ_RQ_QUEUE_OK; |
| 212 | } | 213 | } |
| 213 | 214 | ||
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index c73fc2b74de2..18c5b9b16645 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c | |||
| @@ -32,11 +32,23 @@ | |||
| 32 | #include <linux/atomic.h> | 32 | #include <linux/atomic.h> |
| 33 | #include <linux/pid_namespace.h> | 33 | #include <linux/pid_namespace.h> |
| 34 | 34 | ||
| 35 | #include <asm/unaligned.h> | ||
| 36 | |||
| 37 | #include <linux/cn_proc.h> | 35 | #include <linux/cn_proc.h> |
| 38 | 36 | ||
| 39 | #define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event)) | 37 | /* |
| 38 | * Size of a cn_msg followed by a proc_event structure. Since the | ||
| 39 | * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we | ||
| 40 | * add one 4-byte word to the size here, and then start the actual | ||
| 41 | * cn_msg structure 4 bytes into the stack buffer. The result is that | ||
| 42 | * the immediately following proc_event structure is aligned to 8 bytes. | ||
| 43 | */ | ||
| 44 | #define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4) | ||
| 45 | |||
| 46 | /* See comment above; we test our assumption about sizeof struct cn_msg here. */ | ||
| 47 | static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer) | ||
| 48 | { | ||
| 49 | BUILD_BUG_ON(sizeof(struct cn_msg) != 20); | ||
| 50 | return (struct cn_msg *)(buffer + 4); | ||
| 51 | } | ||
| 40 | 52 | ||
| 41 | static atomic_t proc_event_num_listeners = ATOMIC_INIT(0); | 53 | static atomic_t proc_event_num_listeners = ATOMIC_INIT(0); |
| 42 | static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; | 54 | static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; |
| @@ -56,19 +68,19 @@ void proc_fork_connector(struct task_struct *task) | |||
| 56 | { | 68 | { |
| 57 | struct cn_msg *msg; | 69 | struct cn_msg *msg; |
| 58 | struct proc_event *ev; | 70 | struct proc_event *ev; |
| 59 | __u8 buffer[CN_PROC_MSG_SIZE]; | 71 | __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); |
| 60 | struct timespec ts; | 72 | struct timespec ts; |
| 61 | struct task_struct *parent; | 73 | struct task_struct *parent; |
| 62 | 74 | ||
| 63 | if (atomic_read(&proc_event_num_listeners) < 1) | 75 | if (atomic_read(&proc_event_num_listeners) < 1) |
| 64 | return; | 76 | return; |
| 65 | 77 | ||
| 66 | msg = (struct cn_msg *)buffer; | 78 | msg = buffer_to_cn_msg(buffer); |
| 67 | ev = (struct proc_event *)msg->data; | 79 | ev = (struct proc_event *)msg->data; |
| 68 | memset(&ev->event_data, 0, sizeof(ev->event_data)); | 80 | memset(&ev->event_data, 0, sizeof(ev->event_data)); |
| 69 | get_seq(&msg->seq, &ev->cpu); | 81 | get_seq(&msg->seq, &ev->cpu); |
| 70 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 82 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
| 71 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 83 | ev->timestamp_ns = timespec_to_ns(&ts); |
| 72 | ev->what = PROC_EVENT_FORK; | 84 | ev->what = PROC_EVENT_FORK; |
| 73 | rcu_read_lock(); | 85 | rcu_read_lock(); |
| 74 | parent = rcu_dereference(task->real_parent); | 86 | parent = rcu_dereference(task->real_parent); |
| @@ -91,17 +103,17 @@ void proc_exec_connector(struct task_struct *task) | |||
| 91 | struct cn_msg *msg; | 103 | struct cn_msg *msg; |
| 92 | struct proc_event *ev; | 104 | struct proc_event *ev; |
| 93 | struct timespec ts; | 105 | struct timespec ts; |
| 94 | __u8 buffer[CN_PROC_MSG_SIZE]; | 106 | __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); |
| 95 | 107 | ||
| 96 | if (atomic_read(&proc_event_num_listeners) < 1) | 108 | if (atomic_read(&proc_event_num_listeners) < 1) |
| 97 | return; | 109 | return; |
| 98 | 110 | ||
| 99 | msg = (struct cn_msg *)buffer; | 111 | msg = buffer_to_cn_msg(buffer); |
| 100 | ev = (struct proc_event *)msg->data; | 112 | ev = (struct proc_event *)msg->data; |
| 101 | memset(&ev->event_data, 0, sizeof(ev->event_data)); | 113 | memset(&ev->event_data, 0, sizeof(ev->event_data)); |
| 102 | get_seq(&msg->seq, &ev->cpu); | 114 | get_seq(&msg->seq, &ev->cpu); |
| 103 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 115 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
| 104 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 116 | ev->timestamp_ns = timespec_to_ns(&ts); |
| 105 | ev->what = PROC_EVENT_EXEC; | 117 | ev->what = PROC_EVENT_EXEC; |
| 106 | ev->event_data.exec.process_pid = task->pid; | 118 | ev->event_data.exec.process_pid = task->pid; |
| 107 | ev->event_data.exec.process_tgid = task->tgid; | 119 | ev->event_data.exec.process_tgid = task->tgid; |
| @@ -117,14 +129,14 @@ void proc_id_connector(struct task_struct *task, int which_id) | |||
| 117 | { | 129 | { |
| 118 | struct cn_msg *msg; | 130 | struct cn_msg *msg; |
| 119 | struct proc_event *ev; | 131 | struct proc_event *ev; |
| 120 | __u8 buffer[CN_PROC_MSG_SIZE]; | 132 | __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); |
| 121 | struct timespec ts; | 133 | struct timespec ts; |
| 122 | const struct cred *cred; | 134 | const struct cred *cred; |
| 123 | 135 | ||
| 124 | if (atomic_read(&proc_event_num_listeners) < 1) | 136 | if (atomic_read(&proc_event_num_listeners) < 1) |
| 125 | return; | 137 | return; |
| 126 | 138 | ||
| 127 | msg = (struct cn_msg *)buffer; | 139 | msg = buffer_to_cn_msg(buffer); |
| 128 | ev = (struct proc_event *)msg->data; | 140 | ev = (struct proc_event *)msg->data; |
| 129 | memset(&ev->event_data, 0, sizeof(ev->event_data)); | 141 | memset(&ev->event_data, 0, sizeof(ev->event_data)); |
| 130 | ev->what = which_id; | 142 | ev->what = which_id; |
| @@ -145,7 +157,7 @@ void proc_id_connector(struct task_struct *task, int which_id) | |||
| 145 | rcu_read_unlock(); | 157 | rcu_read_unlock(); |
| 146 | get_seq(&msg->seq, &ev->cpu); | 158 | get_seq(&msg->seq, &ev->cpu); |
| 147 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 159 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
| 148 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 160 | ev->timestamp_ns = timespec_to_ns(&ts); |
| 149 | 161 | ||
| 150 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); | 162 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); |
| 151 | msg->ack = 0; /* not used */ | 163 | msg->ack = 0; /* not used */ |
| @@ -159,17 +171,17 @@ void proc_sid_connector(struct task_struct *task) | |||
| 159 | struct cn_msg *msg; | 171 | struct cn_msg *msg; |
| 160 | struct proc_event *ev; | 172 | struct proc_event *ev; |
| 161 | struct timespec ts; | 173 | struct timespec ts; |
| 162 | __u8 buffer[CN_PROC_MSG_SIZE]; | 174 | __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); |
| 163 | 175 | ||
| 164 | if (atomic_read(&proc_event_num_listeners) < 1) | 176 | if (atomic_read(&proc_event_num_listeners) < 1) |
| 165 | return; | 177 | return; |
| 166 | 178 | ||
| 167 | msg = (struct cn_msg *)buffer; | 179 | msg = buffer_to_cn_msg(buffer); |
| 168 | ev = (struct proc_event *)msg->data; | 180 | ev = (struct proc_event *)msg->data; |
| 169 | memset(&ev->event_data, 0, sizeof(ev->event_data)); | 181 | memset(&ev->event_data, 0, sizeof(ev->event_data)); |
| 170 | get_seq(&msg->seq, &ev->cpu); | 182 | get_seq(&msg->seq, &ev->cpu); |
| 171 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 183 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
| 172 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 184 | ev->timestamp_ns = timespec_to_ns(&ts); |
| 173 | ev->what = PROC_EVENT_SID; | 185 | ev->what = PROC_EVENT_SID; |
| 174 | ev->event_data.sid.process_pid = task->pid; | 186 | ev->event_data.sid.process_pid = task->pid; |
| 175 | ev->event_data.sid.process_tgid = task->tgid; | 187 | ev->event_data.sid.process_tgid = task->tgid; |
| @@ -186,17 +198,17 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id) | |||
| 186 | struct cn_msg *msg; | 198 | struct cn_msg *msg; |
| 187 | struct proc_event *ev; | 199 | struct proc_event *ev; |
| 188 | struct timespec ts; | 200 | struct timespec ts; |
| 189 | __u8 buffer[CN_PROC_MSG_SIZE]; | 201 | __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); |
| 190 | 202 | ||
| 191 | if (atomic_read(&proc_event_num_listeners) < 1) | 203 | if (atomic_read(&proc_event_num_listeners) < 1) |
| 192 | return; | 204 | return; |
| 193 | 205 | ||
| 194 | msg = (struct cn_msg *)buffer; | 206 | msg = buffer_to_cn_msg(buffer); |
| 195 | ev = (struct proc_event *)msg->data; | 207 | ev = (struct proc_event *)msg->data; |
| 196 | memset(&ev->event_data, 0, sizeof(ev->event_data)); | 208 | memset(&ev->event_data, 0, sizeof(ev->event_data)); |
| 197 | get_seq(&msg->seq, &ev->cpu); | 209 | get_seq(&msg->seq, &ev->cpu); |
| 198 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 210 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
| 199 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 211 | ev->timestamp_ns = timespec_to_ns(&ts); |
| 200 | ev->what = PROC_EVENT_PTRACE; | 212 | ev->what = PROC_EVENT_PTRACE; |
| 201 | ev->event_data.ptrace.process_pid = task->pid; | 213 | ev->event_data.ptrace.process_pid = task->pid; |
| 202 | ev->event_data.ptrace.process_tgid = task->tgid; | 214 | ev->event_data.ptrace.process_tgid = task->tgid; |
| @@ -221,17 +233,17 @@ void proc_comm_connector(struct task_struct *task) | |||
| 221 | struct cn_msg *msg; | 233 | struct cn_msg *msg; |
| 222 | struct proc_event *ev; | 234 | struct proc_event *ev; |
| 223 | struct timespec ts; | 235 | struct timespec ts; |
| 224 | __u8 buffer[CN_PROC_MSG_SIZE]; | 236 | __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); |
| 225 | 237 | ||
| 226 | if (atomic_read(&proc_event_num_listeners) < 1) | 238 | if (atomic_read(&proc_event_num_listeners) < 1) |
| 227 | return; | 239 | return; |
| 228 | 240 | ||
| 229 | msg = (struct cn_msg *)buffer; | 241 | msg = buffer_to_cn_msg(buffer); |
| 230 | ev = (struct proc_event *)msg->data; | 242 | ev = (struct proc_event *)msg->data; |
| 231 | memset(&ev->event_data, 0, sizeof(ev->event_data)); | 243 | memset(&ev->event_data, 0, sizeof(ev->event_data)); |
| 232 | get_seq(&msg->seq, &ev->cpu); | 244 | get_seq(&msg->seq, &ev->cpu); |
| 233 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 245 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
| 234 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 246 | ev->timestamp_ns = timespec_to_ns(&ts); |
| 235 | ev->what = PROC_EVENT_COMM; | 247 | ev->what = PROC_EVENT_COMM; |
| 236 | ev->event_data.comm.process_pid = task->pid; | 248 | ev->event_data.comm.process_pid = task->pid; |
| 237 | ev->event_data.comm.process_tgid = task->tgid; | 249 | ev->event_data.comm.process_tgid = task->tgid; |
| @@ -248,18 +260,18 @@ void proc_coredump_connector(struct task_struct *task) | |||
| 248 | { | 260 | { |
| 249 | struct cn_msg *msg; | 261 | struct cn_msg *msg; |
| 250 | struct proc_event *ev; | 262 | struct proc_event *ev; |
| 251 | __u8 buffer[CN_PROC_MSG_SIZE]; | 263 | __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); |
| 252 | struct timespec ts; | 264 | struct timespec ts; |
| 253 | 265 | ||
| 254 | if (atomic_read(&proc_event_num_listeners) < 1) | 266 | if (atomic_read(&proc_event_num_listeners) < 1) |
| 255 | return; | 267 | return; |
| 256 | 268 | ||
| 257 | msg = (struct cn_msg *)buffer; | 269 | msg = buffer_to_cn_msg(buffer); |
| 258 | ev = (struct proc_event *)msg->data; | 270 | ev = (struct proc_event *)msg->data; |
| 259 | memset(&ev->event_data, 0, sizeof(ev->event_data)); | 271 | memset(&ev->event_data, 0, sizeof(ev->event_data)); |
| 260 | get_seq(&msg->seq, &ev->cpu); | 272 | get_seq(&msg->seq, &ev->cpu); |
| 261 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 273 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
| 262 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 274 | ev->timestamp_ns = timespec_to_ns(&ts); |
| 263 | ev->what = PROC_EVENT_COREDUMP; | 275 | ev->what = PROC_EVENT_COREDUMP; |
| 264 | ev->event_data.coredump.process_pid = task->pid; | 276 | ev->event_data.coredump.process_pid = task->pid; |
| 265 | ev->event_data.coredump.process_tgid = task->tgid; | 277 | ev->event_data.coredump.process_tgid = task->tgid; |
| @@ -275,18 +287,18 @@ void proc_exit_connector(struct task_struct *task) | |||
| 275 | { | 287 | { |
| 276 | struct cn_msg *msg; | 288 | struct cn_msg *msg; |
| 277 | struct proc_event *ev; | 289 | struct proc_event *ev; |
| 278 | __u8 buffer[CN_PROC_MSG_SIZE]; | 290 | __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); |
| 279 | struct timespec ts; | 291 | struct timespec ts; |
| 280 | 292 | ||
| 281 | if (atomic_read(&proc_event_num_listeners) < 1) | 293 | if (atomic_read(&proc_event_num_listeners) < 1) |
| 282 | return; | 294 | return; |
| 283 | 295 | ||
| 284 | msg = (struct cn_msg *)buffer; | 296 | msg = buffer_to_cn_msg(buffer); |
| 285 | ev = (struct proc_event *)msg->data; | 297 | ev = (struct proc_event *)msg->data; |
| 286 | memset(&ev->event_data, 0, sizeof(ev->event_data)); | 298 | memset(&ev->event_data, 0, sizeof(ev->event_data)); |
| 287 | get_seq(&msg->seq, &ev->cpu); | 299 | get_seq(&msg->seq, &ev->cpu); |
| 288 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 300 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
| 289 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 301 | ev->timestamp_ns = timespec_to_ns(&ts); |
| 290 | ev->what = PROC_EVENT_EXIT; | 302 | ev->what = PROC_EVENT_EXIT; |
| 291 | ev->event_data.exit.process_pid = task->pid; | 303 | ev->event_data.exit.process_pid = task->pid; |
| 292 | ev->event_data.exit.process_tgid = task->tgid; | 304 | ev->event_data.exit.process_tgid = task->tgid; |
| @@ -312,18 +324,18 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) | |||
| 312 | { | 324 | { |
| 313 | struct cn_msg *msg; | 325 | struct cn_msg *msg; |
| 314 | struct proc_event *ev; | 326 | struct proc_event *ev; |
| 315 | __u8 buffer[CN_PROC_MSG_SIZE]; | 327 | __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); |
| 316 | struct timespec ts; | 328 | struct timespec ts; |
| 317 | 329 | ||
| 318 | if (atomic_read(&proc_event_num_listeners) < 1) | 330 | if (atomic_read(&proc_event_num_listeners) < 1) |
| 319 | return; | 331 | return; |
| 320 | 332 | ||
| 321 | msg = (struct cn_msg *)buffer; | 333 | msg = buffer_to_cn_msg(buffer); |
| 322 | ev = (struct proc_event *)msg->data; | 334 | ev = (struct proc_event *)msg->data; |
| 323 | memset(&ev->event_data, 0, sizeof(ev->event_data)); | 335 | memset(&ev->event_data, 0, sizeof(ev->event_data)); |
| 324 | msg->seq = rcvd_seq; | 336 | msg->seq = rcvd_seq; |
| 325 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 337 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
| 326 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 338 | ev->timestamp_ns = timespec_to_ns(&ts); |
| 327 | ev->cpu = -1; | 339 | ev->cpu = -1; |
| 328 | ev->what = PROC_EVENT_NONE; | 340 | ev->what = PROC_EVENT_NONE; |
| 329 | ev->event_data.ack.err = err; | 341 | ev->event_data.ack.err = err; |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 218460fcd2e4..25a70d06c5bf 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
| @@ -68,6 +68,9 @@ static void cs_check_cpu(int cpu, unsigned int load) | |||
| 68 | 68 | ||
| 69 | dbs_info->requested_freq += get_freq_target(cs_tuners, policy); | 69 | dbs_info->requested_freq += get_freq_target(cs_tuners, policy); |
| 70 | 70 | ||
| 71 | if (dbs_info->requested_freq > policy->max) | ||
| 72 | dbs_info->requested_freq = policy->max; | ||
| 73 | |||
| 71 | __cpufreq_driver_target(policy, dbs_info->requested_freq, | 74 | __cpufreq_driver_target(policy, dbs_info->requested_freq, |
| 72 | CPUFREQ_RELATION_H); | 75 | CPUFREQ_RELATION_H); |
| 73 | return; | 76 | return; |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 0806c31e5764..e6be63561fa6 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
| @@ -328,10 +328,6 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
| 328 | dbs_data->cdata->gov_dbs_timer); | 328 | dbs_data->cdata->gov_dbs_timer); |
| 329 | } | 329 | } |
| 330 | 330 | ||
| 331 | /* | ||
| 332 | * conservative does not implement micro like ondemand | ||
| 333 | * governor, thus we are bound to jiffes/HZ | ||
| 334 | */ | ||
| 335 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { | 331 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { |
| 336 | cs_dbs_info->down_skip = 0; | 332 | cs_dbs_info->down_skip = 0; |
| 337 | cs_dbs_info->enable = 1; | 333 | cs_dbs_info->enable = 1; |
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index be6d14307aa8..a0acd0bfba40 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c | |||
| @@ -53,6 +53,7 @@ static unsigned int omap_getspeed(unsigned int cpu) | |||
| 53 | 53 | ||
| 54 | static int omap_target(struct cpufreq_policy *policy, unsigned int index) | 54 | static int omap_target(struct cpufreq_policy *policy, unsigned int index) |
| 55 | { | 55 | { |
| 56 | int r, ret; | ||
| 56 | struct dev_pm_opp *opp; | 57 | struct dev_pm_opp *opp; |
| 57 | unsigned long freq, volt = 0, volt_old = 0, tol = 0; | 58 | unsigned long freq, volt = 0, volt_old = 0, tol = 0; |
| 58 | unsigned int old_freq, new_freq; | 59 | unsigned int old_freq, new_freq; |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index dd2874ec1927..446687cc2334 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -89,14 +89,15 @@ config AT_HDMAC | |||
| 89 | Support the Atmel AHB DMA controller. | 89 | Support the Atmel AHB DMA controller. |
| 90 | 90 | ||
| 91 | config FSL_DMA | 91 | config FSL_DMA |
| 92 | tristate "Freescale Elo and Elo Plus DMA support" | 92 | tristate "Freescale Elo series DMA support" |
| 93 | depends on FSL_SOC | 93 | depends on FSL_SOC |
| 94 | select DMA_ENGINE | 94 | select DMA_ENGINE |
| 95 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 95 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 96 | ---help--- | 96 | ---help--- |
| 97 | Enable support for the Freescale Elo and Elo Plus DMA controllers. | 97 | Enable support for the Freescale Elo series DMA controllers. |
| 98 | The Elo is the DMA controller on some 82xx and 83xx parts, and the | 98 | The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the |
| 99 | Elo Plus is the DMA controller on 85xx and 86xx parts. | 99 | EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on |
| 100 | some Txxx and Bxxx parts. | ||
| 100 | 101 | ||
| 101 | config MPC512X_DMA | 102 | config MPC512X_DMA |
| 102 | tristate "Freescale MPC512x built-in DMA engine support" | 103 | tristate "Freescale MPC512x built-in DMA engine support" |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index e51a9832ef0d..16a2aa28f856 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
| @@ -1164,42 +1164,12 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, | |||
| 1164 | kfree(txd); | 1164 | kfree(txd); |
| 1165 | } | 1165 | } |
| 1166 | 1166 | ||
| 1167 | static void pl08x_unmap_buffers(struct pl08x_txd *txd) | ||
| 1168 | { | ||
| 1169 | struct device *dev = txd->vd.tx.chan->device->dev; | ||
| 1170 | struct pl08x_sg *dsg; | ||
| 1171 | |||
| 1172 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 1173 | if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
| 1174 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
| 1175 | dma_unmap_single(dev, dsg->src_addr, dsg->len, | ||
| 1176 | DMA_TO_DEVICE); | ||
| 1177 | else { | ||
| 1178 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
| 1179 | dma_unmap_page(dev, dsg->src_addr, dsg->len, | ||
| 1180 | DMA_TO_DEVICE); | ||
| 1181 | } | ||
| 1182 | } | ||
| 1183 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
| 1184 | if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
| 1185 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
| 1186 | dma_unmap_single(dev, dsg->dst_addr, dsg->len, | ||
| 1187 | DMA_FROM_DEVICE); | ||
| 1188 | else | ||
| 1189 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
| 1190 | dma_unmap_page(dev, dsg->dst_addr, dsg->len, | ||
| 1191 | DMA_FROM_DEVICE); | ||
| 1192 | } | ||
| 1193 | } | ||
| 1194 | |||
| 1195 | static void pl08x_desc_free(struct virt_dma_desc *vd) | 1167 | static void pl08x_desc_free(struct virt_dma_desc *vd) |
| 1196 | { | 1168 | { |
| 1197 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); | 1169 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); |
| 1198 | struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); | 1170 | struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); |
| 1199 | 1171 | ||
| 1200 | if (!plchan->slave) | 1172 | dma_descriptor_unmap(txd); |
| 1201 | pl08x_unmap_buffers(txd); | ||
| 1202 | |||
| 1203 | if (!txd->done) | 1173 | if (!txd->done) |
| 1204 | pl08x_release_mux(plchan); | 1174 | pl08x_release_mux(plchan); |
| 1205 | 1175 | ||
| @@ -1252,7 +1222,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, | |||
| 1252 | size_t bytes = 0; | 1222 | size_t bytes = 0; |
| 1253 | 1223 | ||
| 1254 | ret = dma_cookie_status(chan, cookie, txstate); | 1224 | ret = dma_cookie_status(chan, cookie, txstate); |
| 1255 | if (ret == DMA_SUCCESS) | 1225 | if (ret == DMA_COMPLETE) |
| 1256 | return ret; | 1226 | return ret; |
| 1257 | 1227 | ||
| 1258 | /* | 1228 | /* |
| @@ -1267,7 +1237,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, | |||
| 1267 | 1237 | ||
| 1268 | spin_lock_irqsave(&plchan->vc.lock, flags); | 1238 | spin_lock_irqsave(&plchan->vc.lock, flags); |
| 1269 | ret = dma_cookie_status(chan, cookie, txstate); | 1239 | ret = dma_cookie_status(chan, cookie, txstate); |
| 1270 | if (ret != DMA_SUCCESS) { | 1240 | if (ret != DMA_COMPLETE) { |
| 1271 | vd = vchan_find_desc(&plchan->vc, cookie); | 1241 | vd = vchan_find_desc(&plchan->vc, cookie); |
| 1272 | if (vd) { | 1242 | if (vd) { |
| 1273 | /* On the issued list, so hasn't been processed yet */ | 1243 | /* On the issued list, so hasn't been processed yet */ |
| @@ -2138,8 +2108,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 2138 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); | 2108 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); |
| 2139 | writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); | 2109 | writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); |
| 2140 | 2110 | ||
| 2141 | ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, | 2111 | ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x); |
| 2142 | DRIVER_NAME, pl08x); | ||
| 2143 | if (ret) { | 2112 | if (ret) { |
| 2144 | dev_err(&adev->dev, "%s failed to request interrupt %d\n", | 2113 | dev_err(&adev->dev, "%s failed to request interrupt %d\n", |
| 2145 | __func__, adev->irq[0]); | 2114 | __func__, adev->irq[0]); |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index c787f38a186a..e2c04dc81e2a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
| @@ -344,31 +344,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |||
| 344 | /* move myself to free_list */ | 344 | /* move myself to free_list */ |
| 345 | list_move(&desc->desc_node, &atchan->free_list); | 345 | list_move(&desc->desc_node, &atchan->free_list); |
| 346 | 346 | ||
| 347 | /* unmap dma addresses (not on slave channels) */ | 347 | dma_descriptor_unmap(txd); |
| 348 | if (!atchan->chan_common.private) { | ||
| 349 | struct device *parent = chan2parent(&atchan->chan_common); | ||
| 350 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
| 351 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
| 352 | dma_unmap_single(parent, | ||
| 353 | desc->lli.daddr, | ||
| 354 | desc->len, DMA_FROM_DEVICE); | ||
| 355 | else | ||
| 356 | dma_unmap_page(parent, | ||
| 357 | desc->lli.daddr, | ||
| 358 | desc->len, DMA_FROM_DEVICE); | ||
| 359 | } | ||
| 360 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 361 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
| 362 | dma_unmap_single(parent, | ||
| 363 | desc->lli.saddr, | ||
| 364 | desc->len, DMA_TO_DEVICE); | ||
| 365 | else | ||
| 366 | dma_unmap_page(parent, | ||
| 367 | desc->lli.saddr, | ||
| 368 | desc->len, DMA_TO_DEVICE); | ||
| 369 | } | ||
| 370 | } | ||
| 371 | |||
| 372 | /* for cyclic transfers, | 348 | /* for cyclic transfers, |
| 373 | * no need to replay callback function while stopping */ | 349 | * no need to replay callback function while stopping */ |
| 374 | if (!atc_chan_is_cyclic(atchan)) { | 350 | if (!atc_chan_is_cyclic(atchan)) { |
| @@ -1102,7 +1078,7 @@ atc_tx_status(struct dma_chan *chan, | |||
| 1102 | int bytes = 0; | 1078 | int bytes = 0; |
| 1103 | 1079 | ||
| 1104 | ret = dma_cookie_status(chan, cookie, txstate); | 1080 | ret = dma_cookie_status(chan, cookie, txstate); |
| 1105 | if (ret == DMA_SUCCESS) | 1081 | if (ret == DMA_COMPLETE) |
| 1106 | return ret; | 1082 | return ret; |
| 1107 | /* | 1083 | /* |
| 1108 | * There's no point calculating the residue if there's | 1084 | * There's no point calculating the residue if there's |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 31011d2a26fc..3c6716e0b78e 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
| @@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
| 2369 | enum dma_status ret; | 2369 | enum dma_status ret; |
| 2370 | 2370 | ||
| 2371 | ret = dma_cookie_status(chan, cookie, txstate); | 2371 | ret = dma_cookie_status(chan, cookie, txstate); |
| 2372 | if (ret == DMA_SUCCESS) | 2372 | if (ret == DMA_COMPLETE) |
| 2373 | return ret; | 2373 | return ret; |
| 2374 | 2374 | ||
| 2375 | dma_set_residue(txstate, coh901318_get_bytes_left(chan)); | 2375 | dma_set_residue(txstate, coh901318_get_bytes_left(chan)); |
| @@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev) | |||
| 2694 | if (irq < 0) | 2694 | if (irq < 0) |
| 2695 | return irq; | 2695 | return irq; |
| 2696 | 2696 | ||
| 2697 | err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED, | 2697 | err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0, |
| 2698 | "coh901318", base); | 2698 | "coh901318", base); |
| 2699 | if (err) | 2699 | if (err) |
| 2700 | return err; | 2700 | return err; |
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 7c82b92f9b16..c29dacff66fa 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c | |||
| @@ -141,6 +141,9 @@ struct cppi41_dd { | |||
| 141 | const struct chan_queues *queues_rx; | 141 | const struct chan_queues *queues_rx; |
| 142 | const struct chan_queues *queues_tx; | 142 | const struct chan_queues *queues_tx; |
| 143 | struct chan_queues td_queue; | 143 | struct chan_queues td_queue; |
| 144 | |||
| 145 | /* context for suspend/resume */ | ||
| 146 | unsigned int dma_tdfdq; | ||
| 144 | }; | 147 | }; |
| 145 | 148 | ||
| 146 | #define FIST_COMPLETION_QUEUE 93 | 149 | #define FIST_COMPLETION_QUEUE 93 |
| @@ -263,6 +266,15 @@ static u32 pd_trans_len(u32 val) | |||
| 263 | return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1); | 266 | return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1); |
| 264 | } | 267 | } |
| 265 | 268 | ||
| 269 | static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) | ||
| 270 | { | ||
| 271 | u32 desc; | ||
| 272 | |||
| 273 | desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); | ||
| 274 | desc &= ~0x1f; | ||
| 275 | return desc; | ||
| 276 | } | ||
| 277 | |||
| 266 | static irqreturn_t cppi41_irq(int irq, void *data) | 278 | static irqreturn_t cppi41_irq(int irq, void *data) |
| 267 | { | 279 | { |
| 268 | struct cppi41_dd *cdd = data; | 280 | struct cppi41_dd *cdd = data; |
| @@ -300,8 +312,7 @@ static irqreturn_t cppi41_irq(int irq, void *data) | |||
| 300 | q_num = __fls(val); | 312 | q_num = __fls(val); |
| 301 | val &= ~(1 << q_num); | 313 | val &= ~(1 << q_num); |
| 302 | q_num += 32 * i; | 314 | q_num += 32 * i; |
| 303 | desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num)); | 315 | desc = cppi41_pop_desc(cdd, q_num); |
| 304 | desc &= ~0x1f; | ||
| 305 | c = desc_to_chan(cdd, desc); | 316 | c = desc_to_chan(cdd, desc); |
| 306 | if (WARN_ON(!c)) { | 317 | if (WARN_ON(!c)) { |
| 307 | pr_err("%s() q %d desc %08x\n", __func__, | 318 | pr_err("%s() q %d desc %08x\n", __func__, |
| @@ -353,7 +364,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan, | |||
| 353 | 364 | ||
| 354 | /* lock */ | 365 | /* lock */ |
| 355 | ret = dma_cookie_status(chan, cookie, txstate); | 366 | ret = dma_cookie_status(chan, cookie, txstate); |
| 356 | if (txstate && ret == DMA_SUCCESS) | 367 | if (txstate && ret == DMA_COMPLETE) |
| 357 | txstate->residue = c->residue; | 368 | txstate->residue = c->residue; |
| 358 | /* unlock */ | 369 | /* unlock */ |
| 359 | 370 | ||
| @@ -517,15 +528,6 @@ static void cppi41_compute_td_desc(struct cppi41_desc *d) | |||
| 517 | d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; | 528 | d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; |
| 518 | } | 529 | } |
| 519 | 530 | ||
| 520 | static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) | ||
| 521 | { | ||
| 522 | u32 desc; | ||
| 523 | |||
| 524 | desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); | ||
| 525 | desc &= ~0x1f; | ||
| 526 | return desc; | ||
| 527 | } | ||
| 528 | |||
| 529 | static int cppi41_tear_down_chan(struct cppi41_channel *c) | 531 | static int cppi41_tear_down_chan(struct cppi41_channel *c) |
| 530 | { | 532 | { |
| 531 | struct cppi41_dd *cdd = c->cdd; | 533 | struct cppi41_dd *cdd = c->cdd; |
| @@ -561,36 +563,26 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) | |||
| 561 | c->td_retry = 100; | 563 | c->td_retry = 100; |
| 562 | } | 564 | } |
| 563 | 565 | ||
| 564 | if (!c->td_seen) { | 566 | if (!c->td_seen || !c->td_desc_seen) { |
| 565 | unsigned td_comp_queue; | ||
| 566 | 567 | ||
| 567 | if (c->is_tx) | 568 | desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete); |
| 568 | td_comp_queue = cdd->td_queue.complete; | 569 | if (!desc_phys) |
| 569 | else | 570 | desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); |
| 570 | td_comp_queue = c->q_comp_num; | ||
| 571 | 571 | ||
| 572 | desc_phys = cppi41_pop_desc(cdd, td_comp_queue); | 572 | if (desc_phys == c->desc_phys) { |
| 573 | if (desc_phys) { | 573 | c->td_desc_seen = 1; |
| 574 | __iormb(); | 574 | |
| 575 | } else if (desc_phys == td_desc_phys) { | ||
| 576 | u32 pd0; | ||
| 575 | 577 | ||
| 576 | if (desc_phys == td_desc_phys) { | ||
| 577 | u32 pd0; | ||
| 578 | pd0 = td->pd0; | ||
| 579 | WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); | ||
| 580 | WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); | ||
| 581 | WARN_ON((pd0 & 0x1f) != c->port_num); | ||
| 582 | } else { | ||
| 583 | WARN_ON_ONCE(1); | ||
| 584 | } | ||
| 585 | c->td_seen = 1; | ||
| 586 | } | ||
| 587 | } | ||
| 588 | if (!c->td_desc_seen) { | ||
| 589 | desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); | ||
| 590 | if (desc_phys) { | ||
| 591 | __iormb(); | 578 | __iormb(); |
| 592 | WARN_ON(c->desc_phys != desc_phys); | 579 | pd0 = td->pd0; |
| 593 | c->td_desc_seen = 1; | 580 | WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); |
| 581 | WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); | ||
| 582 | WARN_ON((pd0 & 0x1f) != c->port_num); | ||
| 583 | c->td_seen = 1; | ||
| 584 | } else if (desc_phys) { | ||
| 585 | WARN_ON_ONCE(1); | ||
| 594 | } | 586 | } |
| 595 | } | 587 | } |
| 596 | c->td_retry--; | 588 | c->td_retry--; |
| @@ -609,7 +601,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) | |||
| 609 | 601 | ||
| 610 | WARN_ON(!c->td_retry); | 602 | WARN_ON(!c->td_retry); |
| 611 | if (!c->td_desc_seen) { | 603 | if (!c->td_desc_seen) { |
| 612 | desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); | 604 | desc_phys = cppi41_pop_desc(cdd, c->q_num); |
| 613 | WARN_ON(!desc_phys); | 605 | WARN_ON(!desc_phys); |
| 614 | } | 606 | } |
| 615 | 607 | ||
| @@ -674,14 +666,14 @@ static void cleanup_chans(struct cppi41_dd *cdd) | |||
| 674 | } | 666 | } |
| 675 | } | 667 | } |
| 676 | 668 | ||
| 677 | static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd) | 669 | static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) |
| 678 | { | 670 | { |
| 679 | struct cppi41_channel *cchan; | 671 | struct cppi41_channel *cchan; |
| 680 | int i; | 672 | int i; |
| 681 | int ret; | 673 | int ret; |
| 682 | u32 n_chans; | 674 | u32 n_chans; |
| 683 | 675 | ||
| 684 | ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels", | 676 | ret = of_property_read_u32(dev->of_node, "#dma-channels", |
| 685 | &n_chans); | 677 | &n_chans); |
| 686 | if (ret) | 678 | if (ret) |
| 687 | return ret; | 679 | return ret; |
| @@ -719,7 +711,7 @@ err: | |||
| 719 | return -ENOMEM; | 711 | return -ENOMEM; |
| 720 | } | 712 | } |
| 721 | 713 | ||
| 722 | static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) | 714 | static void purge_descs(struct device *dev, struct cppi41_dd *cdd) |
| 723 | { | 715 | { |
| 724 | unsigned int mem_decs; | 716 | unsigned int mem_decs; |
| 725 | int i; | 717 | int i; |
| @@ -731,7 +723,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) | |||
| 731 | cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); | 723 | cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); |
| 732 | cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); | 724 | cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); |
| 733 | 725 | ||
| 734 | dma_free_coherent(&pdev->dev, mem_decs, cdd->cd, | 726 | dma_free_coherent(dev, mem_decs, cdd->cd, |
| 735 | cdd->descs_phys); | 727 | cdd->descs_phys); |
| 736 | } | 728 | } |
| 737 | } | 729 | } |
| @@ -741,19 +733,19 @@ static void disable_sched(struct cppi41_dd *cdd) | |||
| 741 | cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); | 733 | cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); |
| 742 | } | 734 | } |
| 743 | 735 | ||
| 744 | static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd) | 736 | static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd) |
| 745 | { | 737 | { |
| 746 | disable_sched(cdd); | 738 | disable_sched(cdd); |
| 747 | 739 | ||
| 748 | purge_descs(pdev, cdd); | 740 | purge_descs(dev, cdd); |
| 749 | 741 | ||
| 750 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); | 742 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); |
| 751 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); | 743 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); |
| 752 | dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, | 744 | dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, |
| 753 | cdd->scratch_phys); | 745 | cdd->scratch_phys); |
| 754 | } | 746 | } |
| 755 | 747 | ||
| 756 | static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) | 748 | static int init_descs(struct device *dev, struct cppi41_dd *cdd) |
| 757 | { | 749 | { |
| 758 | unsigned int desc_size; | 750 | unsigned int desc_size; |
| 759 | unsigned int mem_decs; | 751 | unsigned int mem_decs; |
| @@ -777,7 +769,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) | |||
| 777 | reg |= ilog2(ALLOC_DECS_NUM) - 5; | 769 | reg |= ilog2(ALLOC_DECS_NUM) - 5; |
| 778 | 770 | ||
| 779 | BUILD_BUG_ON(DESCS_AREAS != 1); | 771 | BUILD_BUG_ON(DESCS_AREAS != 1); |
| 780 | cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs, | 772 | cdd->cd = dma_alloc_coherent(dev, mem_decs, |
| 781 | &cdd->descs_phys, GFP_KERNEL); | 773 | &cdd->descs_phys, GFP_KERNEL); |
| 782 | if (!cdd->cd) | 774 | if (!cdd->cd) |
| 783 | return -ENOMEM; | 775 | return -ENOMEM; |
| @@ -813,12 +805,12 @@ static void init_sched(struct cppi41_dd *cdd) | |||
| 813 | cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); | 805 | cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); |
| 814 | } | 806 | } |
| 815 | 807 | ||
| 816 | static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) | 808 | static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) |
| 817 | { | 809 | { |
| 818 | int ret; | 810 | int ret; |
| 819 | 811 | ||
| 820 | BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); | 812 | BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); |
| 821 | cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, | 813 | cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE, |
| 822 | &cdd->scratch_phys, GFP_KERNEL); | 814 | &cdd->scratch_phys, GFP_KERNEL); |
| 823 | if (!cdd->qmgr_scratch) | 815 | if (!cdd->qmgr_scratch) |
| 824 | return -ENOMEM; | 816 | return -ENOMEM; |
| @@ -827,7 +819,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) | |||
| 827 | cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); | 819 | cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); |
| 828 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); | 820 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); |
| 829 | 821 | ||
| 830 | ret = init_descs(pdev, cdd); | 822 | ret = init_descs(dev, cdd); |
| 831 | if (ret) | 823 | if (ret) |
| 832 | goto err_td; | 824 | goto err_td; |
| 833 | 825 | ||
| @@ -835,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) | |||
| 835 | init_sched(cdd); | 827 | init_sched(cdd); |
| 836 | return 0; | 828 | return 0; |
| 837 | err_td: | 829 | err_td: |
| 838 | deinit_cpii41(pdev, cdd); | 830 | deinit_cppi41(dev, cdd); |
| 839 | return ret; | 831 | return ret; |
| 840 | } | 832 | } |
| 841 | 833 | ||
| @@ -914,11 +906,11 @@ static const struct of_device_id cppi41_dma_ids[] = { | |||
| 914 | }; | 906 | }; |
| 915 | MODULE_DEVICE_TABLE(of, cppi41_dma_ids); | 907 | MODULE_DEVICE_TABLE(of, cppi41_dma_ids); |
| 916 | 908 | ||
| 917 | static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) | 909 | static const struct cppi_glue_infos *get_glue_info(struct device *dev) |
| 918 | { | 910 | { |
| 919 | const struct of_device_id *of_id; | 911 | const struct of_device_id *of_id; |
| 920 | 912 | ||
| 921 | of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node); | 913 | of_id = of_match_node(cppi41_dma_ids, dev->of_node); |
| 922 | if (!of_id) | 914 | if (!of_id) |
| 923 | return NULL; | 915 | return NULL; |
| 924 | return of_id->data; | 916 | return of_id->data; |
| @@ -927,11 +919,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) | |||
| 927 | static int cppi41_dma_probe(struct platform_device *pdev) | 919 | static int cppi41_dma_probe(struct platform_device *pdev) |
| 928 | { | 920 | { |
| 929 | struct cppi41_dd *cdd; | 921 | struct cppi41_dd *cdd; |
| 922 | struct device *dev = &pdev->dev; | ||
| 930 | const struct cppi_glue_infos *glue_info; | 923 | const struct cppi_glue_infos *glue_info; |
| 931 | int irq; | 924 | int irq; |
| 932 | int ret; | 925 | int ret; |
| 933 | 926 | ||
| 934 | glue_info = get_glue_info(pdev); | 927 | glue_info = get_glue_info(dev); |
| 935 | if (!glue_info) | 928 | if (!glue_info) |
| 936 | return -EINVAL; | 929 | return -EINVAL; |
| 937 | 930 | ||
| @@ -946,14 +939,14 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
| 946 | cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; | 939 | cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; |
| 947 | cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; | 940 | cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; |
| 948 | cdd->ddev.device_control = cppi41_dma_control; | 941 | cdd->ddev.device_control = cppi41_dma_control; |
| 949 | cdd->ddev.dev = &pdev->dev; | 942 | cdd->ddev.dev = dev; |
| 950 | INIT_LIST_HEAD(&cdd->ddev.channels); | 943 | INIT_LIST_HEAD(&cdd->ddev.channels); |
| 951 | cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; | 944 | cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; |
| 952 | 945 | ||
| 953 | cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0); | 946 | cdd->usbss_mem = of_iomap(dev->of_node, 0); |
| 954 | cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1); | 947 | cdd->ctrl_mem = of_iomap(dev->of_node, 1); |
| 955 | cdd->sched_mem = of_iomap(pdev->dev.of_node, 2); | 948 | cdd->sched_mem = of_iomap(dev->of_node, 2); |
| 956 | cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3); | 949 | cdd->qmgr_mem = of_iomap(dev->of_node, 3); |
| 957 | 950 | ||
| 958 | if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || | 951 | if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || |
| 959 | !cdd->qmgr_mem) { | 952 | !cdd->qmgr_mem) { |
| @@ -961,31 +954,31 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
| 961 | goto err_remap; | 954 | goto err_remap; |
| 962 | } | 955 | } |
| 963 | 956 | ||
| 964 | pm_runtime_enable(&pdev->dev); | 957 | pm_runtime_enable(dev); |
| 965 | ret = pm_runtime_get_sync(&pdev->dev); | 958 | ret = pm_runtime_get_sync(dev); |
| 966 | if (ret) | 959 | if (ret < 0) |
| 967 | goto err_get_sync; | 960 | goto err_get_sync; |
| 968 | 961 | ||
| 969 | cdd->queues_rx = glue_info->queues_rx; | 962 | cdd->queues_rx = glue_info->queues_rx; |
| 970 | cdd->queues_tx = glue_info->queues_tx; | 963 | cdd->queues_tx = glue_info->queues_tx; |
| 971 | cdd->td_queue = glue_info->td_queue; | 964 | cdd->td_queue = glue_info->td_queue; |
| 972 | 965 | ||
| 973 | ret = init_cppi41(pdev, cdd); | 966 | ret = init_cppi41(dev, cdd); |
| 974 | if (ret) | 967 | if (ret) |
| 975 | goto err_init_cppi; | 968 | goto err_init_cppi; |
| 976 | 969 | ||
| 977 | ret = cppi41_add_chans(pdev, cdd); | 970 | ret = cppi41_add_chans(dev, cdd); |
| 978 | if (ret) | 971 | if (ret) |
| 979 | goto err_chans; | 972 | goto err_chans; |
| 980 | 973 | ||
| 981 | irq = irq_of_parse_and_map(pdev->dev.of_node, 0); | 974 | irq = irq_of_parse_and_map(dev->of_node, 0); |
| 982 | if (!irq) | 975 | if (!irq) |
| 983 | goto err_irq; | 976 | goto err_irq; |
| 984 | 977 | ||
| 985 | cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); | 978 | cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); |
| 986 | 979 | ||
| 987 | ret = request_irq(irq, glue_info->isr, IRQF_SHARED, | 980 | ret = request_irq(irq, glue_info->isr, IRQF_SHARED, |
| 988 | dev_name(&pdev->dev), cdd); | 981 | dev_name(dev), cdd); |
| 989 | if (ret) | 982 | if (ret) |
| 990 | goto err_irq; | 983 | goto err_irq; |
| 991 | cdd->irq = irq; | 984 | cdd->irq = irq; |
| @@ -994,7 +987,7 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
| 994 | if (ret) | 987 | if (ret) |
| 995 | goto err_dma_reg; | 988 | goto err_dma_reg; |
| 996 | 989 | ||
| 997 | ret = of_dma_controller_register(pdev->dev.of_node, | 990 | ret = of_dma_controller_register(dev->of_node, |
| 998 | cppi41_dma_xlate, &cpp41_dma_info); | 991 | cppi41_dma_xlate, &cpp41_dma_info); |
| 999 | if (ret) | 992 | if (ret) |
| 1000 | goto err_of; | 993 | goto err_of; |
| @@ -1009,11 +1002,11 @@ err_irq: | |||
| 1009 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); | 1002 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); |
| 1010 | cleanup_chans(cdd); | 1003 | cleanup_chans(cdd); |
| 1011 | err_chans: | 1004 | err_chans: |
| 1012 | deinit_cpii41(pdev, cdd); | 1005 | deinit_cppi41(dev, cdd); |
| 1013 | err_init_cppi: | 1006 | err_init_cppi: |
| 1014 | pm_runtime_put(&pdev->dev); | 1007 | pm_runtime_put(dev); |
| 1015 | err_get_sync: | 1008 | err_get_sync: |
| 1016 | pm_runtime_disable(&pdev->dev); | 1009 | pm_runtime_disable(dev); |
| 1017 | iounmap(cdd->usbss_mem); | 1010 | iounmap(cdd->usbss_mem); |
| 1018 | iounmap(cdd->ctrl_mem); | 1011 | iounmap(cdd->ctrl_mem); |
| 1019 | iounmap(cdd->sched_mem); | 1012 | iounmap(cdd->sched_mem); |
| @@ -1033,7 +1026,7 @@ static int cppi41_dma_remove(struct platform_device *pdev) | |||
| 1033 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); | 1026 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); |
| 1034 | free_irq(cdd->irq, cdd); | 1027 | free_irq(cdd->irq, cdd); |
| 1035 | cleanup_chans(cdd); | 1028 | cleanup_chans(cdd); |
| 1036 | deinit_cpii41(pdev, cdd); | 1029 | deinit_cppi41(&pdev->dev, cdd); |
| 1037 | iounmap(cdd->usbss_mem); | 1030 | iounmap(cdd->usbss_mem); |
| 1038 | iounmap(cdd->ctrl_mem); | 1031 | iounmap(cdd->ctrl_mem); |
| 1039 | iounmap(cdd->sched_mem); | 1032 | iounmap(cdd->sched_mem); |
| @@ -1044,12 +1037,53 @@ static int cppi41_dma_remove(struct platform_device *pdev) | |||
| 1044 | return 0; | 1037 | return 0; |
| 1045 | } | 1038 | } |
| 1046 | 1039 | ||
| 1040 | #ifdef CONFIG_PM_SLEEP | ||
| 1041 | static int cppi41_suspend(struct device *dev) | ||
| 1042 | { | ||
| 1043 | struct cppi41_dd *cdd = dev_get_drvdata(dev); | ||
| 1044 | |||
| 1045 | cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ); | ||
| 1046 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); | ||
| 1047 | disable_sched(cdd); | ||
| 1048 | |||
| 1049 | return 0; | ||
| 1050 | } | ||
| 1051 | |||
| 1052 | static int cppi41_resume(struct device *dev) | ||
| 1053 | { | ||
| 1054 | struct cppi41_dd *cdd = dev_get_drvdata(dev); | ||
| 1055 | struct cppi41_channel *c; | ||
| 1056 | int i; | ||
| 1057 | |||
| 1058 | for (i = 0; i < DESCS_AREAS; i++) | ||
| 1059 | cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); | ||
| 1060 | |||
| 1061 | list_for_each_entry(c, &cdd->ddev.channels, chan.device_node) | ||
| 1062 | if (!c->is_tx) | ||
| 1063 | cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); | ||
| 1064 | |||
| 1065 | init_sched(cdd); | ||
| 1066 | |||
| 1067 | cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ); | ||
| 1068 | cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); | ||
| 1069 | cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); | ||
| 1070 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); | ||
| 1071 | |||
| 1072 | cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); | ||
| 1073 | |||
| 1074 | return 0; | ||
| 1075 | } | ||
| 1076 | #endif | ||
| 1077 | |||
| 1078 | static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume); | ||
| 1079 | |||
| 1047 | static struct platform_driver cpp41_dma_driver = { | 1080 | static struct platform_driver cpp41_dma_driver = { |
| 1048 | .probe = cppi41_dma_probe, | 1081 | .probe = cppi41_dma_probe, |
| 1049 | .remove = cppi41_dma_remove, | 1082 | .remove = cppi41_dma_remove, |
| 1050 | .driver = { | 1083 | .driver = { |
| 1051 | .name = "cppi41-dma-engine", | 1084 | .name = "cppi41-dma-engine", |
| 1052 | .owner = THIS_MODULE, | 1085 | .owner = THIS_MODULE, |
| 1086 | .pm = &cppi41_pm_ops, | ||
| 1053 | .of_match_table = of_match_ptr(cppi41_dma_ids), | 1087 | .of_match_table = of_match_ptr(cppi41_dma_ids), |
| 1054 | }, | 1088 | }, |
| 1055 | }; | 1089 | }; |
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index b0c0c8268d42..94c380f07538 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c | |||
| @@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c, | |||
| 491 | unsigned long flags; | 491 | unsigned long flags; |
| 492 | 492 | ||
| 493 | status = dma_cookie_status(c, cookie, state); | 493 | status = dma_cookie_status(c, cookie, state); |
| 494 | if (status == DMA_SUCCESS || !state) | 494 | if (status == DMA_COMPLETE || !state) |
| 495 | return status; | 495 | return status; |
| 496 | 496 | ||
| 497 | spin_lock_irqsave(&chan->vchan.lock, flags); | 497 | spin_lock_irqsave(&chan->vchan.lock, flags); |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 9162ac80c18f..ea806bdc12ef 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
| @@ -65,6 +65,7 @@ | |||
| 65 | #include <linux/acpi.h> | 65 | #include <linux/acpi.h> |
| 66 | #include <linux/acpi_dma.h> | 66 | #include <linux/acpi_dma.h> |
| 67 | #include <linux/of_dma.h> | 67 | #include <linux/of_dma.h> |
| 68 | #include <linux/mempool.h> | ||
| 68 | 69 | ||
| 69 | static DEFINE_MUTEX(dma_list_mutex); | 70 | static DEFINE_MUTEX(dma_list_mutex); |
| 70 | static DEFINE_IDR(dma_idr); | 71 | static DEFINE_IDR(dma_idr); |
| @@ -901,98 +902,132 @@ void dma_async_device_unregister(struct dma_device *device) | |||
| 901 | } | 902 | } |
| 902 | EXPORT_SYMBOL(dma_async_device_unregister); | 903 | EXPORT_SYMBOL(dma_async_device_unregister); |
| 903 | 904 | ||
| 904 | /** | 905 | struct dmaengine_unmap_pool { |
| 905 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | 906 | struct kmem_cache *cache; |
| 906 | * @chan: DMA channel to offload copy to | 907 | const char *name; |
| 907 | * @dest: destination address (virtual) | 908 | mempool_t *pool; |
| 908 | * @src: source address (virtual) | 909 | size_t size; |
| 909 | * @len: length | 910 | }; |
| 910 | * | ||
| 911 | * Both @dest and @src must be mappable to a bus address according to the | ||
| 912 | * DMA mapping API rules for streaming mappings. | ||
| 913 | * Both @dest and @src must stay memory resident (kernel memory or locked | ||
| 914 | * user space pages). | ||
| 915 | */ | ||
| 916 | dma_cookie_t | ||
| 917 | dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | ||
| 918 | void *src, size_t len) | ||
| 919 | { | ||
| 920 | struct dma_device *dev = chan->device; | ||
| 921 | struct dma_async_tx_descriptor *tx; | ||
| 922 | dma_addr_t dma_dest, dma_src; | ||
| 923 | dma_cookie_t cookie; | ||
| 924 | unsigned long flags; | ||
| 925 | 911 | ||
| 926 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); | 912 | #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } |
| 927 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | 913 | static struct dmaengine_unmap_pool unmap_pool[] = { |
| 928 | flags = DMA_CTRL_ACK | | 914 | __UNMAP_POOL(2), |
| 929 | DMA_COMPL_SRC_UNMAP_SINGLE | | 915 | #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) |
| 930 | DMA_COMPL_DEST_UNMAP_SINGLE; | 916 | __UNMAP_POOL(16), |
| 931 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); | 917 | __UNMAP_POOL(128), |
| 918 | __UNMAP_POOL(256), | ||
| 919 | #endif | ||
| 920 | }; | ||
| 932 | 921 | ||
| 933 | if (!tx) { | 922 | static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) |
| 934 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | 923 | { |
| 935 | dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | 924 | int order = get_count_order(nr); |
| 936 | return -ENOMEM; | 925 | |
| 926 | switch (order) { | ||
| 927 | case 0 ... 1: | ||
| 928 | return &unmap_pool[0]; | ||
| 929 | case 2 ... 4: | ||
| 930 | return &unmap_pool[1]; | ||
| 931 | case 5 ... 7: | ||
| 932 | return &unmap_pool[2]; | ||
| 933 | case 8: | ||
| 934 | return &unmap_pool[3]; | ||
| 935 | default: | ||
| 936 | BUG(); | ||
| 937 | return NULL; | ||
| 937 | } | 938 | } |
| 939 | } | ||
| 938 | 940 | ||
| 939 | tx->callback = NULL; | 941 | static void dmaengine_unmap(struct kref *kref) |
| 940 | cookie = tx->tx_submit(tx); | 942 | { |
| 943 | struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); | ||
| 944 | struct device *dev = unmap->dev; | ||
| 945 | int cnt, i; | ||
| 946 | |||
| 947 | cnt = unmap->to_cnt; | ||
| 948 | for (i = 0; i < cnt; i++) | ||
| 949 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | ||
| 950 | DMA_TO_DEVICE); | ||
| 951 | cnt += unmap->from_cnt; | ||
| 952 | for (; i < cnt; i++) | ||
| 953 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | ||
| 954 | DMA_FROM_DEVICE); | ||
| 955 | cnt += unmap->bidi_cnt; | ||
| 956 | for (; i < cnt; i++) { | ||
| 957 | if (unmap->addr[i] == 0) | ||
| 958 | continue; | ||
| 959 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | ||
| 960 | DMA_BIDIRECTIONAL); | ||
| 961 | } | ||
| 962 | mempool_free(unmap, __get_unmap_pool(cnt)->pool); | ||
| 963 | } | ||
| 941 | 964 | ||
| 942 | preempt_disable(); | 965 | void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) |
| 943 | __this_cpu_add(chan->local->bytes_transferred, len); | 966 | { |
| 944 | __this_cpu_inc(chan->local->memcpy_count); | 967 | if (unmap) |
| 945 | preempt_enable(); | 968 | kref_put(&unmap->kref, dmaengine_unmap); |
| 969 | } | ||
| 970 | EXPORT_SYMBOL_GPL(dmaengine_unmap_put); | ||
| 946 | 971 | ||
| 947 | return cookie; | 972 | static void dmaengine_destroy_unmap_pool(void) |
| 973 | { | ||
| 974 | int i; | ||
| 975 | |||
| 976 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { | ||
| 977 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; | ||
| 978 | |||
| 979 | if (p->pool) | ||
| 980 | mempool_destroy(p->pool); | ||
| 981 | p->pool = NULL; | ||
| 982 | if (p->cache) | ||
| 983 | kmem_cache_destroy(p->cache); | ||
| 984 | p->cache = NULL; | ||
| 985 | } | ||
| 948 | } | 986 | } |
| 949 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); | ||
| 950 | 987 | ||
| 951 | /** | 988 | static int __init dmaengine_init_unmap_pool(void) |
| 952 | * dma_async_memcpy_buf_to_pg - offloaded copy from address to page | ||
| 953 | * @chan: DMA channel to offload copy to | ||
| 954 | * @page: destination page | ||
| 955 | * @offset: offset in page to copy to | ||
| 956 | * @kdata: source address (virtual) | ||
| 957 | * @len: length | ||
| 958 | * | ||
| 959 | * Both @page/@offset and @kdata must be mappable to a bus address according | ||
| 960 | * to the DMA mapping API rules for streaming mappings. | ||
| 961 | * Both @page/@offset and @kdata must stay memory resident (kernel memory or | ||
| 962 | * locked user space pages) | ||
| 963 | */ | ||
| 964 | dma_cookie_t | ||
| 965 | dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | ||
| 966 | unsigned int offset, void *kdata, size_t len) | ||
| 967 | { | 989 | { |
| 968 | struct dma_device *dev = chan->device; | 990 | int i; |
| 969 | struct dma_async_tx_descriptor *tx; | ||
| 970 | dma_addr_t dma_dest, dma_src; | ||
| 971 | dma_cookie_t cookie; | ||
| 972 | unsigned long flags; | ||
| 973 | 991 | ||
| 974 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); | 992 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { |
| 975 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | 993 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; |
| 976 | flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; | 994 | size_t size; |
| 977 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); | ||
| 978 | 995 | ||
| 979 | if (!tx) { | 996 | size = sizeof(struct dmaengine_unmap_data) + |
| 980 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | 997 | sizeof(dma_addr_t) * p->size; |
| 981 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | 998 | |
| 982 | return -ENOMEM; | 999 | p->cache = kmem_cache_create(p->name, size, 0, |
| 1000 | SLAB_HWCACHE_ALIGN, NULL); | ||
| 1001 | if (!p->cache) | ||
| 1002 | break; | ||
| 1003 | p->pool = mempool_create_slab_pool(1, p->cache); | ||
| 1004 | if (!p->pool) | ||
| 1005 | break; | ||
| 983 | } | 1006 | } |
| 984 | 1007 | ||
| 985 | tx->callback = NULL; | 1008 | if (i == ARRAY_SIZE(unmap_pool)) |
| 986 | cookie = tx->tx_submit(tx); | 1009 | return 0; |
| 987 | 1010 | ||
| 988 | preempt_disable(); | 1011 | dmaengine_destroy_unmap_pool(); |
| 989 | __this_cpu_add(chan->local->bytes_transferred, len); | 1012 | return -ENOMEM; |
| 990 | __this_cpu_inc(chan->local->memcpy_count); | 1013 | } |
| 991 | preempt_enable(); | ||
| 992 | 1014 | ||
| 993 | return cookie; | 1015 | struct dmaengine_unmap_data * |
| 1016 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) | ||
| 1017 | { | ||
| 1018 | struct dmaengine_unmap_data *unmap; | ||
| 1019 | |||
| 1020 | unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); | ||
| 1021 | if (!unmap) | ||
| 1022 | return NULL; | ||
| 1023 | |||
| 1024 | memset(unmap, 0, sizeof(*unmap)); | ||
| 1025 | kref_init(&unmap->kref); | ||
| 1026 | unmap->dev = dev; | ||
| 1027 | |||
| 1028 | return unmap; | ||
| 994 | } | 1029 | } |
| 995 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); | 1030 | EXPORT_SYMBOL(dmaengine_get_unmap_data); |
| 996 | 1031 | ||
| 997 | /** | 1032 | /** |
| 998 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page | 1033 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page |
| @@ -1015,24 +1050,33 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
| 1015 | { | 1050 | { |
| 1016 | struct dma_device *dev = chan->device; | 1051 | struct dma_device *dev = chan->device; |
| 1017 | struct dma_async_tx_descriptor *tx; | 1052 | struct dma_async_tx_descriptor *tx; |
| 1018 | dma_addr_t dma_dest, dma_src; | 1053 | struct dmaengine_unmap_data *unmap; |
| 1019 | dma_cookie_t cookie; | 1054 | dma_cookie_t cookie; |
| 1020 | unsigned long flags; | 1055 | unsigned long flags; |
| 1021 | 1056 | ||
| 1022 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); | 1057 | unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); |
| 1023 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, | 1058 | if (!unmap) |
| 1024 | DMA_FROM_DEVICE); | 1059 | return -ENOMEM; |
| 1060 | |||
| 1061 | unmap->to_cnt = 1; | ||
| 1062 | unmap->from_cnt = 1; | ||
| 1063 | unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len, | ||
| 1064 | DMA_TO_DEVICE); | ||
| 1065 | unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len, | ||
| 1066 | DMA_FROM_DEVICE); | ||
| 1067 | unmap->len = len; | ||
| 1025 | flags = DMA_CTRL_ACK; | 1068 | flags = DMA_CTRL_ACK; |
| 1026 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); | 1069 | tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0], |
| 1070 | len, flags); | ||
| 1027 | 1071 | ||
| 1028 | if (!tx) { | 1072 | if (!tx) { |
| 1029 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); | 1073 | dmaengine_unmap_put(unmap); |
| 1030 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | ||
| 1031 | return -ENOMEM; | 1074 | return -ENOMEM; |
| 1032 | } | 1075 | } |
| 1033 | 1076 | ||
| 1034 | tx->callback = NULL; | 1077 | dma_set_unmap(tx, unmap); |
| 1035 | cookie = tx->tx_submit(tx); | 1078 | cookie = tx->tx_submit(tx); |
| 1079 | dmaengine_unmap_put(unmap); | ||
| 1036 | 1080 | ||
| 1037 | preempt_disable(); | 1081 | preempt_disable(); |
| 1038 | __this_cpu_add(chan->local->bytes_transferred, len); | 1082 | __this_cpu_add(chan->local->bytes_transferred, len); |
| @@ -1043,6 +1087,52 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
| 1043 | } | 1087 | } |
| 1044 | EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); | 1088 | EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); |
| 1045 | 1089 | ||
| 1090 | /** | ||
| 1091 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | ||
| 1092 | * @chan: DMA channel to offload copy to | ||
| 1093 | * @dest: destination address (virtual) | ||
| 1094 | * @src: source address (virtual) | ||
| 1095 | * @len: length | ||
| 1096 | * | ||
| 1097 | * Both @dest and @src must be mappable to a bus address according to the | ||
| 1098 | * DMA mapping API rules for streaming mappings. | ||
| 1099 | * Both @dest and @src must stay memory resident (kernel memory or locked | ||
| 1100 | * user space pages). | ||
| 1101 | */ | ||
| 1102 | dma_cookie_t | ||
| 1103 | dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | ||
| 1104 | void *src, size_t len) | ||
| 1105 | { | ||
| 1106 | return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest), | ||
| 1107 | (unsigned long) dest & ~PAGE_MASK, | ||
| 1108 | virt_to_page(src), | ||
| 1109 | (unsigned long) src & ~PAGE_MASK, len); | ||
| 1110 | } | ||
| 1111 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); | ||
| 1112 | |||
| 1113 | /** | ||
| 1114 | * dma_async_memcpy_buf_to_pg - offloaded copy from address to page | ||
| 1115 | * @chan: DMA channel to offload copy to | ||
| 1116 | * @page: destination page | ||
| 1117 | * @offset: offset in page to copy to | ||
| 1118 | * @kdata: source address (virtual) | ||
| 1119 | * @len: length | ||
| 1120 | * | ||
| 1121 | * Both @page/@offset and @kdata must be mappable to a bus address according | ||
| 1122 | * to the DMA mapping API rules for streaming mappings. | ||
| 1123 | * Both @page/@offset and @kdata must stay memory resident (kernel memory or | ||
| 1124 | * locked user space pages) | ||
| 1125 | */ | ||
| 1126 | dma_cookie_t | ||
| 1127 | dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | ||
| 1128 | unsigned int offset, void *kdata, size_t len) | ||
| 1129 | { | ||
| 1130 | return dma_async_memcpy_pg_to_pg(chan, page, offset, | ||
| 1131 | virt_to_page(kdata), | ||
| 1132 | (unsigned long) kdata & ~PAGE_MASK, len); | ||
| 1133 | } | ||
| 1134 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); | ||
| 1135 | |||
| 1046 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | 1136 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, |
| 1047 | struct dma_chan *chan) | 1137 | struct dma_chan *chan) |
| 1048 | { | 1138 | { |
| @@ -1062,7 +1152,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |||
| 1062 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); | 1152 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); |
| 1063 | 1153 | ||
| 1064 | if (!tx) | 1154 | if (!tx) |
| 1065 | return DMA_SUCCESS; | 1155 | return DMA_COMPLETE; |
| 1066 | 1156 | ||
| 1067 | while (tx->cookie == -EBUSY) { | 1157 | while (tx->cookie == -EBUSY) { |
| 1068 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | 1158 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { |
| @@ -1116,6 +1206,10 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies); | |||
| 1116 | 1206 | ||
| 1117 | static int __init dma_bus_init(void) | 1207 | static int __init dma_bus_init(void) |
| 1118 | { | 1208 | { |
| 1209 | int err = dmaengine_init_unmap_pool(); | ||
| 1210 | |||
| 1211 | if (err) | ||
| 1212 | return err; | ||
| 1119 | return class_register(&dma_devclass); | 1213 | return class_register(&dma_devclass); |
| 1120 | } | 1214 | } |
| 1121 | arch_initcall(dma_bus_init); | 1215 | arch_initcall(dma_bus_init); |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 92f796cdc6ab..20f9a3aaf926 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
| @@ -8,6 +8,8 @@ | |||
| 8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
| 10 | */ | 10 | */ |
| 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 12 | |||
| 11 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
| 12 | #include <linux/dma-mapping.h> | 14 | #include <linux/dma-mapping.h> |
| 13 | #include <linux/dmaengine.h> | 15 | #include <linux/dmaengine.h> |
| @@ -19,10 +21,6 @@ | |||
| 19 | #include <linux/random.h> | 21 | #include <linux/random.h> |
| 20 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
| 21 | #include <linux/wait.h> | 23 | #include <linux/wait.h> |
| 22 | #include <linux/ctype.h> | ||
| 23 | #include <linux/debugfs.h> | ||
| 24 | #include <linux/uaccess.h> | ||
| 25 | #include <linux/seq_file.h> | ||
| 26 | 24 | ||
| 27 | static unsigned int test_buf_size = 16384; | 25 | static unsigned int test_buf_size = 16384; |
| 28 | module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); | 26 | module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); |
| @@ -68,92 +66,13 @@ module_param(timeout, uint, S_IRUGO | S_IWUSR); | |||
| 68 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " | 66 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " |
| 69 | "Pass -1 for infinite timeout"); | 67 | "Pass -1 for infinite timeout"); |
| 70 | 68 | ||
| 71 | /* Maximum amount of mismatched bytes in buffer to print */ | 69 | static bool noverify; |
| 72 | #define MAX_ERROR_COUNT 32 | 70 | module_param(noverify, bool, S_IRUGO | S_IWUSR); |
| 73 | 71 | MODULE_PARM_DESC(noverify, "Disable random data setup and verification"); | |
| 74 | /* | ||
| 75 | * Initialization patterns. All bytes in the source buffer has bit 7 | ||
| 76 | * set, all bytes in the destination buffer has bit 7 cleared. | ||
| 77 | * | ||
| 78 | * Bit 6 is set for all bytes which are to be copied by the DMA | ||
| 79 | * engine. Bit 5 is set for all bytes which are to be overwritten by | ||
| 80 | * the DMA engine. | ||
| 81 | * | ||
| 82 | * The remaining bits are the inverse of a counter which increments by | ||
| 83 | * one for each byte address. | ||
| 84 | */ | ||
| 85 | #define PATTERN_SRC 0x80 | ||
| 86 | #define PATTERN_DST 0x00 | ||
| 87 | #define PATTERN_COPY 0x40 | ||
| 88 | #define PATTERN_OVERWRITE 0x20 | ||
| 89 | #define PATTERN_COUNT_MASK 0x1f | ||
| 90 | |||
| 91 | enum dmatest_error_type { | ||
| 92 | DMATEST_ET_OK, | ||
| 93 | DMATEST_ET_MAP_SRC, | ||
| 94 | DMATEST_ET_MAP_DST, | ||
| 95 | DMATEST_ET_PREP, | ||
| 96 | DMATEST_ET_SUBMIT, | ||
| 97 | DMATEST_ET_TIMEOUT, | ||
| 98 | DMATEST_ET_DMA_ERROR, | ||
| 99 | DMATEST_ET_DMA_IN_PROGRESS, | ||
| 100 | DMATEST_ET_VERIFY, | ||
| 101 | DMATEST_ET_VERIFY_BUF, | ||
| 102 | }; | ||
| 103 | |||
| 104 | struct dmatest_verify_buffer { | ||
| 105 | unsigned int index; | ||
| 106 | u8 expected; | ||
| 107 | u8 actual; | ||
| 108 | }; | ||
| 109 | |||
| 110 | struct dmatest_verify_result { | ||
| 111 | unsigned int error_count; | ||
| 112 | struct dmatest_verify_buffer data[MAX_ERROR_COUNT]; | ||
| 113 | u8 pattern; | ||
| 114 | bool is_srcbuf; | ||
| 115 | }; | ||
| 116 | |||
| 117 | struct dmatest_thread_result { | ||
| 118 | struct list_head node; | ||
| 119 | unsigned int n; | ||
| 120 | unsigned int src_off; | ||
| 121 | unsigned int dst_off; | ||
| 122 | unsigned int len; | ||
| 123 | enum dmatest_error_type type; | ||
| 124 | union { | ||
| 125 | unsigned long data; | ||
| 126 | dma_cookie_t cookie; | ||
| 127 | enum dma_status status; | ||
| 128 | int error; | ||
| 129 | struct dmatest_verify_result *vr; | ||
| 130 | }; | ||
| 131 | }; | ||
| 132 | |||
| 133 | struct dmatest_result { | ||
| 134 | struct list_head node; | ||
| 135 | char *name; | ||
| 136 | struct list_head results; | ||
| 137 | }; | ||
| 138 | |||
| 139 | struct dmatest_info; | ||
| 140 | |||
| 141 | struct dmatest_thread { | ||
| 142 | struct list_head node; | ||
| 143 | struct dmatest_info *info; | ||
| 144 | struct task_struct *task; | ||
| 145 | struct dma_chan *chan; | ||
| 146 | u8 **srcs; | ||
| 147 | u8 **dsts; | ||
| 148 | enum dma_transaction_type type; | ||
| 149 | bool done; | ||
| 150 | }; | ||
| 151 | 72 | ||
| 152 | struct dmatest_chan { | 73 | static bool verbose; |
| 153 | struct list_head node; | 74 | module_param(verbose, bool, S_IRUGO | S_IWUSR); |
| 154 | struct dma_chan *chan; | 75 | MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); |
| 155 | struct list_head threads; | ||
| 156 | }; | ||
| 157 | 76 | ||
| 158 | /** | 77 | /** |
| 159 | * struct dmatest_params - test parameters. | 78 | * struct dmatest_params - test parameters. |
| @@ -177,6 +96,7 @@ struct dmatest_params { | |||
| 177 | unsigned int xor_sources; | 96 | unsigned int xor_sources; |
| 178 | unsigned int pq_sources; | 97 | unsigned int pq_sources; |
| 179 | int timeout; | 98 | int timeout; |
| 99 | bool noverify; | ||
| 180 | }; | 100 | }; |
| 181 | 101 | ||
| 182 | /** | 102 | /** |
| @@ -184,7 +104,7 @@ struct dmatest_params { | |||
| 184 | * @params: test parameters | 104 | * @params: test parameters |
| 185 | * @lock: access protection to the fields of this structure | 105 | * @lock: access protection to the fields of this structure |
| 186 | */ | 106 | */ |
| 187 | struct dmatest_info { | 107 | static struct dmatest_info { |
| 188 | /* Test parameters */ | 108 | /* Test parameters */ |
| 189 | struct dmatest_params params; | 109 | struct dmatest_params params; |
| 190 | 110 | ||
| @@ -192,16 +112,95 @@ struct dmatest_info { | |||
| 192 | struct list_head channels; | 112 | struct list_head channels; |
| 193 | unsigned int nr_channels; | 113 | unsigned int nr_channels; |
| 194 | struct mutex lock; | 114 | struct mutex lock; |
| 115 | bool did_init; | ||
| 116 | } test_info = { | ||
| 117 | .channels = LIST_HEAD_INIT(test_info.channels), | ||
| 118 | .lock = __MUTEX_INITIALIZER(test_info.lock), | ||
| 119 | }; | ||
| 120 | |||
| 121 | static int dmatest_run_set(const char *val, const struct kernel_param *kp); | ||
| 122 | static int dmatest_run_get(char *val, const struct kernel_param *kp); | ||
| 123 | static struct kernel_param_ops run_ops = { | ||
| 124 | .set = dmatest_run_set, | ||
| 125 | .get = dmatest_run_get, | ||
| 126 | }; | ||
| 127 | static bool dmatest_run; | ||
| 128 | module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR); | ||
| 129 | MODULE_PARM_DESC(run, "Run the test (default: false)"); | ||
| 130 | |||
| 131 | /* Maximum amount of mismatched bytes in buffer to print */ | ||
| 132 | #define MAX_ERROR_COUNT 32 | ||
| 133 | |||
| 134 | /* | ||
| 135 | * Initialization patterns. All bytes in the source buffer has bit 7 | ||
| 136 | * set, all bytes in the destination buffer has bit 7 cleared. | ||
| 137 | * | ||
| 138 | * Bit 6 is set for all bytes which are to be copied by the DMA | ||
| 139 | * engine. Bit 5 is set for all bytes which are to be overwritten by | ||
| 140 | * the DMA engine. | ||
| 141 | * | ||
| 142 | * The remaining bits are the inverse of a counter which increments by | ||
| 143 | * one for each byte address. | ||
| 144 | */ | ||
| 145 | #define PATTERN_SRC 0x80 | ||
| 146 | #define PATTERN_DST 0x00 | ||
| 147 | #define PATTERN_COPY 0x40 | ||
| 148 | #define PATTERN_OVERWRITE 0x20 | ||
| 149 | #define PATTERN_COUNT_MASK 0x1f | ||
| 195 | 150 | ||
| 196 | /* debugfs related stuff */ | 151 | struct dmatest_thread { |
| 197 | struct dentry *root; | 152 | struct list_head node; |
| 153 | struct dmatest_info *info; | ||
| 154 | struct task_struct *task; | ||
| 155 | struct dma_chan *chan; | ||
| 156 | u8 **srcs; | ||
| 157 | u8 **dsts; | ||
| 158 | enum dma_transaction_type type; | ||
| 159 | bool done; | ||
| 160 | }; | ||
| 198 | 161 | ||
| 199 | /* Test results */ | 162 | struct dmatest_chan { |
| 200 | struct list_head results; | 163 | struct list_head node; |
| 201 | struct mutex results_lock; | 164 | struct dma_chan *chan; |
| 165 | struct list_head threads; | ||
| 202 | }; | 166 | }; |
| 203 | 167 | ||
| 204 | static struct dmatest_info test_info; | 168 | static DECLARE_WAIT_QUEUE_HEAD(thread_wait); |
| 169 | static bool wait; | ||
| 170 | |||
| 171 | static bool is_threaded_test_run(struct dmatest_info *info) | ||
| 172 | { | ||
| 173 | struct dmatest_chan *dtc; | ||
| 174 | |||
| 175 | list_for_each_entry(dtc, &info->channels, node) { | ||
| 176 | struct dmatest_thread *thread; | ||
| 177 | |||
| 178 | list_for_each_entry(thread, &dtc->threads, node) { | ||
| 179 | if (!thread->done) | ||
| 180 | return true; | ||
| 181 | } | ||
| 182 | } | ||
| 183 | |||
| 184 | return false; | ||
| 185 | } | ||
| 186 | |||
| 187 | static int dmatest_wait_get(char *val, const struct kernel_param *kp) | ||
| 188 | { | ||
| 189 | struct dmatest_info *info = &test_info; | ||
| 190 | struct dmatest_params *params = &info->params; | ||
| 191 | |||
| 192 | if (params->iterations) | ||
| 193 | wait_event(thread_wait, !is_threaded_test_run(info)); | ||
| 194 | wait = true; | ||
| 195 | return param_get_bool(val, kp); | ||
| 196 | } | ||
| 197 | |||
| 198 | static struct kernel_param_ops wait_ops = { | ||
| 199 | .get = dmatest_wait_get, | ||
| 200 | .set = param_set_bool, | ||
| 201 | }; | ||
| 202 | module_param_cb(wait, &wait_ops, &wait, S_IRUGO); | ||
| 203 | MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)"); | ||
| 205 | 204 | ||
| 206 | static bool dmatest_match_channel(struct dmatest_params *params, | 205 | static bool dmatest_match_channel(struct dmatest_params *params, |
| 207 | struct dma_chan *chan) | 206 | struct dma_chan *chan) |
| @@ -223,7 +222,7 @@ static unsigned long dmatest_random(void) | |||
| 223 | { | 222 | { |
| 224 | unsigned long buf; | 223 | unsigned long buf; |
| 225 | 224 | ||
| 226 | get_random_bytes(&buf, sizeof(buf)); | 225 | prandom_bytes(&buf, sizeof(buf)); |
| 227 | return buf; | 226 | return buf; |
| 228 | } | 227 | } |
| 229 | 228 | ||
| @@ -262,9 +261,31 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, | |||
| 262 | } | 261 | } |
| 263 | } | 262 | } |
| 264 | 263 | ||
| 265 | static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, | 264 | static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, |
| 266 | unsigned int start, unsigned int end, unsigned int counter, | 265 | unsigned int counter, bool is_srcbuf) |
| 267 | u8 pattern, bool is_srcbuf) | 266 | { |
| 267 | u8 diff = actual ^ pattern; | ||
| 268 | u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); | ||
| 269 | const char *thread_name = current->comm; | ||
| 270 | |||
| 271 | if (is_srcbuf) | ||
| 272 | pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n", | ||
| 273 | thread_name, index, expected, actual); | ||
| 274 | else if ((pattern & PATTERN_COPY) | ||
| 275 | && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) | ||
| 276 | pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n", | ||
| 277 | thread_name, index, expected, actual); | ||
| 278 | else if (diff & PATTERN_SRC) | ||
| 279 | pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n", | ||
| 280 | thread_name, index, expected, actual); | ||
| 281 | else | ||
| 282 | pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n", | ||
| 283 | thread_name, index, expected, actual); | ||
| 284 | } | ||
| 285 | |||
| 286 | static unsigned int dmatest_verify(u8 **bufs, unsigned int start, | ||
| 287 | unsigned int end, unsigned int counter, u8 pattern, | ||
| 288 | bool is_srcbuf) | ||
| 268 | { | 289 | { |
| 269 | unsigned int i; | 290 | unsigned int i; |
| 270 | unsigned int error_count = 0; | 291 | unsigned int error_count = 0; |
| @@ -272,7 +293,6 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, | |||
| 272 | u8 expected; | 293 | u8 expected; |
| 273 | u8 *buf; | 294 | u8 *buf; |
| 274 | unsigned int counter_orig = counter; | 295 | unsigned int counter_orig = counter; |
| 275 | struct dmatest_verify_buffer *vb; | ||
| 276 | 296 | ||
| 277 | for (; (buf = *bufs); bufs++) { | 297 | for (; (buf = *bufs); bufs++) { |
| 278 | counter = counter_orig; | 298 | counter = counter_orig; |
| @@ -280,12 +300,9 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, | |||
| 280 | actual = buf[i]; | 300 | actual = buf[i]; |
| 281 | expected = pattern | (~counter & PATTERN_COUNT_MASK); | 301 | expected = pattern | (~counter & PATTERN_COUNT_MASK); |
| 282 | if (actual != expected) { | 302 | if (actual != expected) { |
| 283 | if (error_count < MAX_ERROR_COUNT && vr) { | 303 | if (error_count < MAX_ERROR_COUNT) |
| 284 | vb = &vr->data[error_count]; | 304 | dmatest_mismatch(actual, pattern, i, |
| 285 | vb->index = i; | 305 | counter, is_srcbuf); |
| 286 | vb->expected = expected; | ||
| 287 | vb->actual = actual; | ||
| 288 | } | ||
| 289 | error_count++; | 306 | error_count++; |
| 290 | } | 307 | } |
| 291 | counter++; | 308 | counter++; |
| @@ -293,7 +310,7 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, | |||
| 293 | } | 310 | } |
| 294 | 311 | ||
| 295 | if (error_count > MAX_ERROR_COUNT) | 312 | if (error_count > MAX_ERROR_COUNT) |
| 296 | pr_warning("%s: %u errors suppressed\n", | 313 | pr_warn("%s: %u errors suppressed\n", |
| 297 | current->comm, error_count - MAX_ERROR_COUNT); | 314 | current->comm, error_count - MAX_ERROR_COUNT); |
| 298 | 315 | ||
| 299 | return error_count; | 316 | return error_count; |
| @@ -313,20 +330,6 @@ static void dmatest_callback(void *arg) | |||
| 313 | wake_up_all(done->wait); | 330 | wake_up_all(done->wait); |
| 314 | } | 331 | } |
| 315 | 332 | ||
| 316 | static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len, | ||
| 317 | unsigned int count) | ||
| 318 | { | ||
| 319 | while (count--) | ||
| 320 | dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE); | ||
| 321 | } | ||
| 322 | |||
| 323 | static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, | ||
| 324 | unsigned int count) | ||
| 325 | { | ||
| 326 | while (count--) | ||
| 327 | dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); | ||
| 328 | } | ||
| 329 | |||
| 330 | static unsigned int min_odd(unsigned int x, unsigned int y) | 333 | static unsigned int min_odd(unsigned int x, unsigned int y) |
| 331 | { | 334 | { |
| 332 | unsigned int val = min(x, y); | 335 | unsigned int val = min(x, y); |
| @@ -334,172 +337,49 @@ static unsigned int min_odd(unsigned int x, unsigned int y) | |||
| 334 | return val % 2 ? val : val - 1; | 337 | return val % 2 ? val : val - 1; |
| 335 | } | 338 | } |
| 336 | 339 | ||
| 337 | static char *verify_result_get_one(struct dmatest_verify_result *vr, | 340 | static void result(const char *err, unsigned int n, unsigned int src_off, |
| 338 | unsigned int i) | 341 | unsigned int dst_off, unsigned int len, unsigned long data) |
| 339 | { | 342 | { |
| 340 | struct dmatest_verify_buffer *vb = &vr->data[i]; | 343 | pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", |
| 341 | u8 diff = vb->actual ^ vr->pattern; | 344 | current->comm, n, err, src_off, dst_off, len, data); |
| 342 | static char buf[512]; | ||
| 343 | char *msg; | ||
| 344 | |||
| 345 | if (vr->is_srcbuf) | ||
| 346 | msg = "srcbuf overwritten!"; | ||
| 347 | else if ((vr->pattern & PATTERN_COPY) | ||
| 348 | && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) | ||
| 349 | msg = "dstbuf not copied!"; | ||
| 350 | else if (diff & PATTERN_SRC) | ||
| 351 | msg = "dstbuf was copied!"; | ||
| 352 | else | ||
| 353 | msg = "dstbuf mismatch!"; | ||
| 354 | |||
| 355 | snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg, | ||
| 356 | vb->index, vb->expected, vb->actual); | ||
| 357 | |||
| 358 | return buf; | ||
| 359 | } | 345 | } |
| 360 | 346 | ||
| 361 | static char *thread_result_get(const char *name, | 347 | static void dbg_result(const char *err, unsigned int n, unsigned int src_off, |
| 362 | struct dmatest_thread_result *tr) | 348 | unsigned int dst_off, unsigned int len, |
| 349 | unsigned long data) | ||
| 363 | { | 350 | { |
| 364 | static const char * const messages[] = { | 351 | pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", |
| 365 | [DMATEST_ET_OK] = "No errors", | 352 | current->comm, n, err, src_off, dst_off, len, data); |
| 366 | [DMATEST_ET_MAP_SRC] = "src mapping error", | ||
| 367 | [DMATEST_ET_MAP_DST] = "dst mapping error", | ||
| 368 | [DMATEST_ET_PREP] = "prep error", | ||
| 369 | [DMATEST_ET_SUBMIT] = "submit error", | ||
| 370 | [DMATEST_ET_TIMEOUT] = "test timed out", | ||
| 371 | [DMATEST_ET_DMA_ERROR] = | ||
| 372 | "got completion callback (DMA_ERROR)", | ||
| 373 | [DMATEST_ET_DMA_IN_PROGRESS] = | ||
| 374 | "got completion callback (DMA_IN_PROGRESS)", | ||
| 375 | [DMATEST_ET_VERIFY] = "errors", | ||
| 376 | [DMATEST_ET_VERIFY_BUF] = "verify errors", | ||
| 377 | }; | ||
| 378 | static char buf[512]; | ||
| 379 | |||
| 380 | snprintf(buf, sizeof(buf) - 1, | ||
| 381 | "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", | ||
| 382 | name, tr->n, messages[tr->type], tr->src_off, tr->dst_off, | ||
| 383 | tr->len, tr->data); | ||
| 384 | |||
| 385 | return buf; | ||
| 386 | } | 353 | } |
| 387 | 354 | ||
| 388 | static int thread_result_add(struct dmatest_info *info, | 355 | #define verbose_result(err, n, src_off, dst_off, len, data) ({ \ |
| 389 | struct dmatest_result *r, enum dmatest_error_type type, | 356 | if (verbose) \ |
| 390 | unsigned int n, unsigned int src_off, unsigned int dst_off, | 357 | result(err, n, src_off, dst_off, len, data); \ |
| 391 | unsigned int len, unsigned long data) | 358 | else \ |
| 392 | { | 359 | dbg_result(err, n, src_off, dst_off, len, data); \ |
| 393 | struct dmatest_thread_result *tr; | 360 | }) |
| 394 | |||
| 395 | tr = kzalloc(sizeof(*tr), GFP_KERNEL); | ||
| 396 | if (!tr) | ||
| 397 | return -ENOMEM; | ||
| 398 | |||
| 399 | tr->type = type; | ||
| 400 | tr->n = n; | ||
| 401 | tr->src_off = src_off; | ||
| 402 | tr->dst_off = dst_off; | ||
| 403 | tr->len = len; | ||
| 404 | tr->data = data; | ||
| 405 | 361 | ||
| 406 | mutex_lock(&info->results_lock); | 362 | static unsigned long long dmatest_persec(s64 runtime, unsigned int val) |
| 407 | list_add_tail(&tr->node, &r->results); | ||
| 408 | mutex_unlock(&info->results_lock); | ||
| 409 | |||
| 410 | if (tr->type == DMATEST_ET_OK) | ||
| 411 | pr_debug("%s\n", thread_result_get(r->name, tr)); | ||
| 412 | else | ||
| 413 | pr_warn("%s\n", thread_result_get(r->name, tr)); | ||
| 414 | |||
| 415 | return 0; | ||
| 416 | } | ||
| 417 | |||
| 418 | static unsigned int verify_result_add(struct dmatest_info *info, | ||
| 419 | struct dmatest_result *r, unsigned int n, | ||
| 420 | unsigned int src_off, unsigned int dst_off, unsigned int len, | ||
| 421 | u8 **bufs, int whence, unsigned int counter, u8 pattern, | ||
| 422 | bool is_srcbuf) | ||
| 423 | { | 363 | { |
| 424 | struct dmatest_verify_result *vr; | 364 | unsigned long long per_sec = 1000000; |
| 425 | unsigned int error_count; | ||
| 426 | unsigned int buf_off = is_srcbuf ? src_off : dst_off; | ||
| 427 | unsigned int start, end; | ||
| 428 | |||
| 429 | if (whence < 0) { | ||
| 430 | start = 0; | ||
| 431 | end = buf_off; | ||
| 432 | } else if (whence > 0) { | ||
| 433 | start = buf_off + len; | ||
| 434 | end = info->params.buf_size; | ||
| 435 | } else { | ||
| 436 | start = buf_off; | ||
| 437 | end = buf_off + len; | ||
| 438 | } | ||
| 439 | 365 | ||
| 440 | vr = kmalloc(sizeof(*vr), GFP_KERNEL); | 366 | if (runtime <= 0) |
| 441 | if (!vr) { | 367 | return 0; |
| 442 | pr_warn("dmatest: No memory to store verify result\n"); | ||
| 443 | return dmatest_verify(NULL, bufs, start, end, counter, pattern, | ||
| 444 | is_srcbuf); | ||
| 445 | } | ||
| 446 | |||
| 447 | vr->pattern = pattern; | ||
| 448 | vr->is_srcbuf = is_srcbuf; | ||
| 449 | |||
| 450 | error_count = dmatest_verify(vr, bufs, start, end, counter, pattern, | ||
| 451 | is_srcbuf); | ||
| 452 | if (error_count) { | ||
| 453 | vr->error_count = error_count; | ||
| 454 | thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off, | ||
| 455 | dst_off, len, (unsigned long)vr); | ||
| 456 | return error_count; | ||
| 457 | } | ||
| 458 | |||
| 459 | kfree(vr); | ||
| 460 | return 0; | ||
| 461 | } | ||
| 462 | |||
| 463 | static void result_free(struct dmatest_info *info, const char *name) | ||
| 464 | { | ||
| 465 | struct dmatest_result *r, *_r; | ||
| 466 | |||
| 467 | mutex_lock(&info->results_lock); | ||
| 468 | list_for_each_entry_safe(r, _r, &info->results, node) { | ||
| 469 | struct dmatest_thread_result *tr, *_tr; | ||
| 470 | |||
| 471 | if (name && strcmp(r->name, name)) | ||
| 472 | continue; | ||
| 473 | |||
| 474 | list_for_each_entry_safe(tr, _tr, &r->results, node) { | ||
| 475 | if (tr->type == DMATEST_ET_VERIFY_BUF) | ||
| 476 | kfree(tr->vr); | ||
| 477 | list_del(&tr->node); | ||
| 478 | kfree(tr); | ||
| 479 | } | ||
| 480 | 368 | ||
| 481 | kfree(r->name); | 369 | /* drop precision until runtime is 32-bits */ |
| 482 | list_del(&r->node); | 370 | while (runtime > UINT_MAX) { |
| 483 | kfree(r); | 371 | runtime >>= 1; |
| 372 | per_sec <<= 1; | ||
| 484 | } | 373 | } |
| 485 | 374 | ||
| 486 | mutex_unlock(&info->results_lock); | 375 | per_sec *= val; |
| 376 | do_div(per_sec, runtime); | ||
| 377 | return per_sec; | ||
| 487 | } | 378 | } |
| 488 | 379 | ||
| 489 | static struct dmatest_result *result_init(struct dmatest_info *info, | 380 | static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) |
| 490 | const char *name) | ||
| 491 | { | 381 | { |
| 492 | struct dmatest_result *r; | 382 | return dmatest_persec(runtime, len >> 10); |
| 493 | |||
| 494 | r = kzalloc(sizeof(*r), GFP_KERNEL); | ||
| 495 | if (r) { | ||
| 496 | r->name = kstrdup(name, GFP_KERNEL); | ||
| 497 | INIT_LIST_HEAD(&r->results); | ||
| 498 | mutex_lock(&info->results_lock); | ||
| 499 | list_add_tail(&r->node, &info->results); | ||
| 500 | mutex_unlock(&info->results_lock); | ||
| 501 | } | ||
| 502 | return r; | ||
| 503 | } | 383 | } |
| 504 | 384 | ||
| 505 | /* | 385 | /* |
| @@ -525,7 +405,6 @@ static int dmatest_func(void *data) | |||
| 525 | struct dmatest_params *params; | 405 | struct dmatest_params *params; |
| 526 | struct dma_chan *chan; | 406 | struct dma_chan *chan; |
| 527 | struct dma_device *dev; | 407 | struct dma_device *dev; |
| 528 | const char *thread_name; | ||
| 529 | unsigned int src_off, dst_off, len; | 408 | unsigned int src_off, dst_off, len; |
| 530 | unsigned int error_count; | 409 | unsigned int error_count; |
| 531 | unsigned int failed_tests = 0; | 410 | unsigned int failed_tests = 0; |
| @@ -538,9 +417,10 @@ static int dmatest_func(void *data) | |||
| 538 | int src_cnt; | 417 | int src_cnt; |
| 539 | int dst_cnt; | 418 | int dst_cnt; |
| 540 | int i; | 419 | int i; |
| 541 | struct dmatest_result *result; | 420 | ktime_t ktime; |
| 421 | s64 runtime = 0; | ||
| 422 | unsigned long long total_len = 0; | ||
| 542 | 423 | ||
| 543 | thread_name = current->comm; | ||
| 544 | set_freezable(); | 424 | set_freezable(); |
| 545 | 425 | ||
| 546 | ret = -ENOMEM; | 426 | ret = -ENOMEM; |
| @@ -570,10 +450,6 @@ static int dmatest_func(void *data) | |||
| 570 | } else | 450 | } else |
| 571 | goto err_thread_type; | 451 | goto err_thread_type; |
| 572 | 452 | ||
| 573 | result = result_init(info, thread_name); | ||
| 574 | if (!result) | ||
| 575 | goto err_srcs; | ||
| 576 | |||
| 577 | thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); | 453 | thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); |
| 578 | if (!thread->srcs) | 454 | if (!thread->srcs) |
| 579 | goto err_srcs; | 455 | goto err_srcs; |
| @@ -597,17 +473,17 @@ static int dmatest_func(void *data) | |||
| 597 | set_user_nice(current, 10); | 473 | set_user_nice(current, 10); |
| 598 | 474 | ||
| 599 | /* | 475 | /* |
| 600 | * src buffers are freed by the DMAEngine code with dma_unmap_single() | 476 | * src and dst buffers are freed by ourselves below |
| 601 | * dst buffers are freed by ourselves below | ||
| 602 | */ | 477 | */ |
| 603 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | 478 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; |
| 604 | | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; | ||
| 605 | 479 | ||
| 480 | ktime = ktime_get(); | ||
| 606 | while (!kthread_should_stop() | 481 | while (!kthread_should_stop() |
| 607 | && !(params->iterations && total_tests >= params->iterations)) { | 482 | && !(params->iterations && total_tests >= params->iterations)) { |
| 608 | struct dma_async_tx_descriptor *tx = NULL; | 483 | struct dma_async_tx_descriptor *tx = NULL; |
| 609 | dma_addr_t dma_srcs[src_cnt]; | 484 | struct dmaengine_unmap_data *um; |
| 610 | dma_addr_t dma_dsts[dst_cnt]; | 485 | dma_addr_t srcs[src_cnt]; |
| 486 | dma_addr_t *dsts; | ||
| 611 | u8 align = 0; | 487 | u8 align = 0; |
| 612 | 488 | ||
| 613 | total_tests++; | 489 | total_tests++; |
| @@ -626,81 +502,103 @@ static int dmatest_func(void *data) | |||
| 626 | break; | 502 | break; |
| 627 | } | 503 | } |
| 628 | 504 | ||
| 629 | len = dmatest_random() % params->buf_size + 1; | 505 | if (params->noverify) { |
| 506 | len = params->buf_size; | ||
| 507 | src_off = 0; | ||
| 508 | dst_off = 0; | ||
| 509 | } else { | ||
| 510 | len = dmatest_random() % params->buf_size + 1; | ||
| 511 | len = (len >> align) << align; | ||
| 512 | if (!len) | ||
| 513 | len = 1 << align; | ||
| 514 | src_off = dmatest_random() % (params->buf_size - len + 1); | ||
| 515 | dst_off = dmatest_random() % (params->buf_size - len + 1); | ||
| 516 | |||
| 517 | src_off = (src_off >> align) << align; | ||
| 518 | dst_off = (dst_off >> align) << align; | ||
| 519 | |||
| 520 | dmatest_init_srcs(thread->srcs, src_off, len, | ||
| 521 | params->buf_size); | ||
| 522 | dmatest_init_dsts(thread->dsts, dst_off, len, | ||
| 523 | params->buf_size); | ||
| 524 | } | ||
| 525 | |||
| 630 | len = (len >> align) << align; | 526 | len = (len >> align) << align; |
| 631 | if (!len) | 527 | if (!len) |
| 632 | len = 1 << align; | 528 | len = 1 << align; |
| 633 | src_off = dmatest_random() % (params->buf_size - len + 1); | 529 | total_len += len; |
| 634 | dst_off = dmatest_random() % (params->buf_size - len + 1); | ||
| 635 | 530 | ||
| 636 | src_off = (src_off >> align) << align; | 531 | um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt, |
| 637 | dst_off = (dst_off >> align) << align; | 532 | GFP_KERNEL); |
| 638 | 533 | if (!um) { | |
| 639 | dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); | 534 | failed_tests++; |
| 640 | dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); | 535 | result("unmap data NULL", total_tests, |
| 536 | src_off, dst_off, len, ret); | ||
| 537 | continue; | ||
| 538 | } | ||
| 641 | 539 | ||
| 540 | um->len = params->buf_size; | ||
| 642 | for (i = 0; i < src_cnt; i++) { | 541 | for (i = 0; i < src_cnt; i++) { |
| 643 | u8 *buf = thread->srcs[i] + src_off; | 542 | unsigned long buf = (unsigned long) thread->srcs[i]; |
| 644 | 543 | struct page *pg = virt_to_page(buf); | |
| 645 | dma_srcs[i] = dma_map_single(dev->dev, buf, len, | 544 | unsigned pg_off = buf & ~PAGE_MASK; |
| 646 | DMA_TO_DEVICE); | 545 | |
| 647 | ret = dma_mapping_error(dev->dev, dma_srcs[i]); | 546 | um->addr[i] = dma_map_page(dev->dev, pg, pg_off, |
| 547 | um->len, DMA_TO_DEVICE); | ||
| 548 | srcs[i] = um->addr[i] + src_off; | ||
| 549 | ret = dma_mapping_error(dev->dev, um->addr[i]); | ||
| 648 | if (ret) { | 550 | if (ret) { |
| 649 | unmap_src(dev->dev, dma_srcs, len, i); | 551 | dmaengine_unmap_put(um); |
| 650 | thread_result_add(info, result, | 552 | result("src mapping error", total_tests, |
| 651 | DMATEST_ET_MAP_SRC, | 553 | src_off, dst_off, len, ret); |
| 652 | total_tests, src_off, dst_off, | ||
| 653 | len, ret); | ||
| 654 | failed_tests++; | 554 | failed_tests++; |
| 655 | continue; | 555 | continue; |
| 656 | } | 556 | } |
| 557 | um->to_cnt++; | ||
| 657 | } | 558 | } |
| 658 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ | 559 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ |
| 560 | dsts = &um->addr[src_cnt]; | ||
| 659 | for (i = 0; i < dst_cnt; i++) { | 561 | for (i = 0; i < dst_cnt; i++) { |
| 660 | dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], | 562 | unsigned long buf = (unsigned long) thread->dsts[i]; |
| 661 | params->buf_size, | 563 | struct page *pg = virt_to_page(buf); |
| 662 | DMA_BIDIRECTIONAL); | 564 | unsigned pg_off = buf & ~PAGE_MASK; |
| 663 | ret = dma_mapping_error(dev->dev, dma_dsts[i]); | 565 | |
| 566 | dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, | ||
| 567 | DMA_BIDIRECTIONAL); | ||
| 568 | ret = dma_mapping_error(dev->dev, dsts[i]); | ||
| 664 | if (ret) { | 569 | if (ret) { |
| 665 | unmap_src(dev->dev, dma_srcs, len, src_cnt); | 570 | dmaengine_unmap_put(um); |
| 666 | unmap_dst(dev->dev, dma_dsts, params->buf_size, | 571 | result("dst mapping error", total_tests, |
| 667 | i); | 572 | src_off, dst_off, len, ret); |
| 668 | thread_result_add(info, result, | ||
| 669 | DMATEST_ET_MAP_DST, | ||
| 670 | total_tests, src_off, dst_off, | ||
| 671 | len, ret); | ||
| 672 | failed_tests++; | 573 | failed_tests++; |
| 673 | continue; | 574 | continue; |
| 674 | } | 575 | } |
| 576 | um->bidi_cnt++; | ||
| 675 | } | 577 | } |
| 676 | 578 | ||
| 677 | if (thread->type == DMA_MEMCPY) | 579 | if (thread->type == DMA_MEMCPY) |
| 678 | tx = dev->device_prep_dma_memcpy(chan, | 580 | tx = dev->device_prep_dma_memcpy(chan, |
| 679 | dma_dsts[0] + dst_off, | 581 | dsts[0] + dst_off, |
| 680 | dma_srcs[0], len, | 582 | srcs[0], len, flags); |
| 681 | flags); | ||
| 682 | else if (thread->type == DMA_XOR) | 583 | else if (thread->type == DMA_XOR) |
| 683 | tx = dev->device_prep_dma_xor(chan, | 584 | tx = dev->device_prep_dma_xor(chan, |
| 684 | dma_dsts[0] + dst_off, | 585 | dsts[0] + dst_off, |
| 685 | dma_srcs, src_cnt, | 586 | srcs, src_cnt, |
| 686 | len, flags); | 587 | len, flags); |
| 687 | else if (thread->type == DMA_PQ) { | 588 | else if (thread->type == DMA_PQ) { |
| 688 | dma_addr_t dma_pq[dst_cnt]; | 589 | dma_addr_t dma_pq[dst_cnt]; |
| 689 | 590 | ||
| 690 | for (i = 0; i < dst_cnt; i++) | 591 | for (i = 0; i < dst_cnt; i++) |
| 691 | dma_pq[i] = dma_dsts[i] + dst_off; | 592 | dma_pq[i] = dsts[i] + dst_off; |
| 692 | tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, | 593 | tx = dev->device_prep_dma_pq(chan, dma_pq, srcs, |
| 693 | src_cnt, pq_coefs, | 594 | src_cnt, pq_coefs, |
| 694 | len, flags); | 595 | len, flags); |
| 695 | } | 596 | } |
| 696 | 597 | ||
| 697 | if (!tx) { | 598 | if (!tx) { |
| 698 | unmap_src(dev->dev, dma_srcs, len, src_cnt); | 599 | dmaengine_unmap_put(um); |
| 699 | unmap_dst(dev->dev, dma_dsts, params->buf_size, | 600 | result("prep error", total_tests, src_off, |
| 700 | dst_cnt); | 601 | dst_off, len, ret); |
| 701 | thread_result_add(info, result, DMATEST_ET_PREP, | ||
| 702 | total_tests, src_off, dst_off, | ||
| 703 | len, 0); | ||
| 704 | msleep(100); | 602 | msleep(100); |
| 705 | failed_tests++; | 603 | failed_tests++; |
| 706 | continue; | 604 | continue; |
| @@ -712,9 +610,9 @@ static int dmatest_func(void *data) | |||
| 712 | cookie = tx->tx_submit(tx); | 610 | cookie = tx->tx_submit(tx); |
| 713 | 611 | ||
| 714 | if (dma_submit_error(cookie)) { | 612 | if (dma_submit_error(cookie)) { |
| 715 | thread_result_add(info, result, DMATEST_ET_SUBMIT, | 613 | dmaengine_unmap_put(um); |
| 716 | total_tests, src_off, dst_off, | 614 | result("submit error", total_tests, src_off, |
| 717 | len, cookie); | 615 | dst_off, len, ret); |
| 718 | msleep(100); | 616 | msleep(100); |
| 719 | failed_tests++; | 617 | failed_tests++; |
| 720 | continue; | 618 | continue; |
| @@ -735,59 +633,59 @@ static int dmatest_func(void *data) | |||
| 735 | * free it this time?" dancing. For now, just | 633 | * free it this time?" dancing. For now, just |
| 736 | * leave it dangling. | 634 | * leave it dangling. |
| 737 | */ | 635 | */ |
| 738 | thread_result_add(info, result, DMATEST_ET_TIMEOUT, | 636 | dmaengine_unmap_put(um); |
| 739 | total_tests, src_off, dst_off, | 637 | result("test timed out", total_tests, src_off, dst_off, |
| 740 | len, 0); | 638 | len, 0); |
| 741 | failed_tests++; | 639 | failed_tests++; |
| 742 | continue; | 640 | continue; |
| 743 | } else if (status != DMA_SUCCESS) { | 641 | } else if (status != DMA_COMPLETE) { |
| 744 | enum dmatest_error_type type = (status == DMA_ERROR) ? | 642 | dmaengine_unmap_put(um); |
| 745 | DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS; | 643 | result(status == DMA_ERROR ? |
| 746 | thread_result_add(info, result, type, | 644 | "completion error status" : |
| 747 | total_tests, src_off, dst_off, | 645 | "completion busy status", total_tests, src_off, |
| 748 | len, status); | 646 | dst_off, len, ret); |
| 749 | failed_tests++; | 647 | failed_tests++; |
| 750 | continue; | 648 | continue; |
| 751 | } | 649 | } |
| 752 | 650 | ||
| 753 | /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ | 651 | dmaengine_unmap_put(um); |
| 754 | unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); | ||
| 755 | 652 | ||
| 756 | error_count = 0; | 653 | if (params->noverify) { |
| 654 | verbose_result("test passed", total_tests, src_off, | ||
| 655 | dst_off, len, 0); | ||
| 656 | continue; | ||
| 657 | } | ||
| 757 | 658 | ||
| 758 | pr_debug("%s: verifying source buffer...\n", thread_name); | 659 | pr_debug("%s: verifying source buffer...\n", current->comm); |
| 759 | error_count += verify_result_add(info, result, total_tests, | 660 | error_count = dmatest_verify(thread->srcs, 0, src_off, |
| 760 | src_off, dst_off, len, thread->srcs, -1, | ||
| 761 | 0, PATTERN_SRC, true); | 661 | 0, PATTERN_SRC, true); |
| 762 | error_count += verify_result_add(info, result, total_tests, | 662 | error_count += dmatest_verify(thread->srcs, src_off, |
| 763 | src_off, dst_off, len, thread->srcs, 0, | 663 | src_off + len, src_off, |
| 764 | src_off, PATTERN_SRC | PATTERN_COPY, true); | 664 | PATTERN_SRC | PATTERN_COPY, true); |
| 765 | error_count += verify_result_add(info, result, total_tests, | 665 | error_count += dmatest_verify(thread->srcs, src_off + len, |
| 766 | src_off, dst_off, len, thread->srcs, 1, | 666 | params->buf_size, src_off + len, |
| 767 | src_off + len, PATTERN_SRC, true); | 667 | PATTERN_SRC, true); |
| 768 | 668 | ||
| 769 | pr_debug("%s: verifying dest buffer...\n", thread_name); | 669 | pr_debug("%s: verifying dest buffer...\n", current->comm); |
| 770 | error_count += verify_result_add(info, result, total_tests, | 670 | error_count += dmatest_verify(thread->dsts, 0, dst_off, |
| 771 | src_off, dst_off, len, thread->dsts, -1, | ||
| 772 | 0, PATTERN_DST, false); | 671 | 0, PATTERN_DST, false); |
| 773 | error_count += verify_result_add(info, result, total_tests, | 672 | error_count += dmatest_verify(thread->dsts, dst_off, |
| 774 | src_off, dst_off, len, thread->dsts, 0, | 673 | dst_off + len, src_off, |
| 775 | src_off, PATTERN_SRC | PATTERN_COPY, false); | 674 | PATTERN_SRC | PATTERN_COPY, false); |
| 776 | error_count += verify_result_add(info, result, total_tests, | 675 | error_count += dmatest_verify(thread->dsts, dst_off + len, |
| 777 | src_off, dst_off, len, thread->dsts, 1, | 676 | params->buf_size, dst_off + len, |
| 778 | dst_off + len, PATTERN_DST, false); | 677 | PATTERN_DST, false); |
| 779 | 678 | ||
| 780 | if (error_count) { | 679 | if (error_count) { |
| 781 | thread_result_add(info, result, DMATEST_ET_VERIFY, | 680 | result("data error", total_tests, src_off, dst_off, |
| 782 | total_tests, src_off, dst_off, | 681 | len, error_count); |
| 783 | len, error_count); | ||
| 784 | failed_tests++; | 682 | failed_tests++; |
| 785 | } else { | 683 | } else { |
| 786 | thread_result_add(info, result, DMATEST_ET_OK, | 684 | verbose_result("test passed", total_tests, src_off, |
| 787 | total_tests, src_off, dst_off, | 685 | dst_off, len, 0); |
| 788 | len, 0); | ||
| 789 | } | 686 | } |
| 790 | } | 687 | } |
| 688 | runtime = ktime_us_delta(ktime_get(), ktime); | ||
| 791 | 689 | ||
| 792 | ret = 0; | 690 | ret = 0; |
| 793 | for (i = 0; thread->dsts[i]; i++) | 691 | for (i = 0; thread->dsts[i]; i++) |
| @@ -802,20 +700,17 @@ err_srcbuf: | |||
| 802 | err_srcs: | 700 | err_srcs: |
| 803 | kfree(pq_coefs); | 701 | kfree(pq_coefs); |
| 804 | err_thread_type: | 702 | err_thread_type: |
| 805 | pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", | 703 | pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n", |
| 806 | thread_name, total_tests, failed_tests, ret); | 704 | current->comm, total_tests, failed_tests, |
| 705 | dmatest_persec(runtime, total_tests), | ||
| 706 | dmatest_KBs(runtime, total_len), ret); | ||
| 807 | 707 | ||
| 808 | /* terminate all transfers on specified channels */ | 708 | /* terminate all transfers on specified channels */ |
| 809 | if (ret) | 709 | if (ret) |
| 810 | dmaengine_terminate_all(chan); | 710 | dmaengine_terminate_all(chan); |
| 811 | 711 | ||
| 812 | thread->done = true; | 712 | thread->done = true; |
| 813 | 713 | wake_up(&thread_wait); | |
| 814 | if (params->iterations > 0) | ||
| 815 | while (!kthread_should_stop()) { | ||
| 816 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); | ||
| 817 | interruptible_sleep_on(&wait_dmatest_exit); | ||
| 818 | } | ||
| 819 | 714 | ||
| 820 | return ret; | 715 | return ret; |
| 821 | } | 716 | } |
| @@ -828,9 +723,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) | |||
| 828 | 723 | ||
| 829 | list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { | 724 | list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { |
| 830 | ret = kthread_stop(thread->task); | 725 | ret = kthread_stop(thread->task); |
| 831 | pr_debug("dmatest: thread %s exited with status %d\n", | 726 | pr_debug("thread %s exited with status %d\n", |
| 832 | thread->task->comm, ret); | 727 | thread->task->comm, ret); |
| 833 | list_del(&thread->node); | 728 | list_del(&thread->node); |
| 729 | put_task_struct(thread->task); | ||
| 834 | kfree(thread); | 730 | kfree(thread); |
| 835 | } | 731 | } |
| 836 | 732 | ||
| @@ -861,27 +757,27 @@ static int dmatest_add_threads(struct dmatest_info *info, | |||
| 861 | for (i = 0; i < params->threads_per_chan; i++) { | 757 | for (i = 0; i < params->threads_per_chan; i++) { |
| 862 | thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); | 758 | thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); |
| 863 | if (!thread) { | 759 | if (!thread) { |
| 864 | pr_warning("dmatest: No memory for %s-%s%u\n", | 760 | pr_warn("No memory for %s-%s%u\n", |
| 865 | dma_chan_name(chan), op, i); | 761 | dma_chan_name(chan), op, i); |
| 866 | |||
| 867 | break; | 762 | break; |
| 868 | } | 763 | } |
| 869 | thread->info = info; | 764 | thread->info = info; |
| 870 | thread->chan = dtc->chan; | 765 | thread->chan = dtc->chan; |
| 871 | thread->type = type; | 766 | thread->type = type; |
| 872 | smp_wmb(); | 767 | smp_wmb(); |
| 873 | thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", | 768 | thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", |
| 874 | dma_chan_name(chan), op, i); | 769 | dma_chan_name(chan), op, i); |
| 875 | if (IS_ERR(thread->task)) { | 770 | if (IS_ERR(thread->task)) { |
| 876 | pr_warning("dmatest: Failed to run thread %s-%s%u\n", | 771 | pr_warn("Failed to create thread %s-%s%u\n", |
| 877 | dma_chan_name(chan), op, i); | 772 | dma_chan_name(chan), op, i); |
| 878 | kfree(thread); | 773 | kfree(thread); |
| 879 | break; | 774 | break; |
| 880 | } | 775 | } |
| 881 | 776 | ||
| 882 | /* srcbuf and dstbuf are allocated by the thread itself */ | 777 | /* srcbuf and dstbuf are allocated by the thread itself */ |
| 883 | 778 | get_task_struct(thread->task); | |
| 884 | list_add_tail(&thread->node, &dtc->threads); | 779 | list_add_tail(&thread->node, &dtc->threads); |
| 780 | wake_up_process(thread->task); | ||
| 885 | } | 781 | } |
| 886 | 782 | ||
| 887 | return i; | 783 | return i; |
| @@ -897,7 +793,7 @@ static int dmatest_add_channel(struct dmatest_info *info, | |||
| 897 | 793 | ||
| 898 | dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); | 794 | dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); |
| 899 | if (!dtc) { | 795 | if (!dtc) { |
| 900 | pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); | 796 | pr_warn("No memory for %s\n", dma_chan_name(chan)); |
| 901 | return -ENOMEM; | 797 | return -ENOMEM; |
| 902 | } | 798 | } |
| 903 | 799 | ||
| @@ -917,7 +813,7 @@ static int dmatest_add_channel(struct dmatest_info *info, | |||
| 917 | thread_count += cnt > 0 ? cnt : 0; | 813 | thread_count += cnt > 0 ? cnt : 0; |
| 918 | } | 814 | } |
| 919 | 815 | ||
| 920 | pr_info("dmatest: Started %u threads using %s\n", | 816 | pr_info("Started %u threads using %s\n", |
| 921 | thread_count, dma_chan_name(chan)); | 817 | thread_count, dma_chan_name(chan)); |
| 922 | 818 | ||
| 923 | list_add_tail(&dtc->node, &info->channels); | 819 | list_add_tail(&dtc->node, &info->channels); |
| @@ -937,20 +833,20 @@ static bool filter(struct dma_chan *chan, void *param) | |||
| 937 | return true; | 833 | return true; |
| 938 | } | 834 | } |
| 939 | 835 | ||
| 940 | static int __run_threaded_test(struct dmatest_info *info) | 836 | static void request_channels(struct dmatest_info *info, |
| 837 | enum dma_transaction_type type) | ||
| 941 | { | 838 | { |
| 942 | dma_cap_mask_t mask; | 839 | dma_cap_mask_t mask; |
| 943 | struct dma_chan *chan; | ||
| 944 | struct dmatest_params *params = &info->params; | ||
| 945 | int err = 0; | ||
| 946 | 840 | ||
| 947 | dma_cap_zero(mask); | 841 | dma_cap_zero(mask); |
| 948 | dma_cap_set(DMA_MEMCPY, mask); | 842 | dma_cap_set(type, mask); |
| 949 | for (;;) { | 843 | for (;;) { |
| 844 | struct dmatest_params *params = &info->params; | ||
| 845 | struct dma_chan *chan; | ||
| 846 | |||
| 950 | chan = dma_request_channel(mask, filter, params); | 847 | chan = dma_request_channel(mask, filter, params); |
| 951 | if (chan) { | 848 | if (chan) { |
| 952 | err = dmatest_add_channel(info, chan); | 849 | if (dmatest_add_channel(info, chan)) { |
| 953 | if (err) { | ||
| 954 | dma_release_channel(chan); | 850 | dma_release_channel(chan); |
| 955 | break; /* add_channel failed, punt */ | 851 | break; /* add_channel failed, punt */ |
| 956 | } | 852 | } |
| @@ -960,22 +856,30 @@ static int __run_threaded_test(struct dmatest_info *info) | |||
| 960 | info->nr_channels >= params->max_channels) | 856 | info->nr_channels >= params->max_channels) |
| 961 | break; /* we have all we need */ | 857 | break; /* we have all we need */ |
| 962 | } | 858 | } |
| 963 | return err; | ||
| 964 | } | 859 | } |
| 965 | 860 | ||
| 966 | #ifndef MODULE | 861 | static void run_threaded_test(struct dmatest_info *info) |
| 967 | static int run_threaded_test(struct dmatest_info *info) | ||
| 968 | { | 862 | { |
| 969 | int ret; | 863 | struct dmatest_params *params = &info->params; |
| 970 | 864 | ||
| 971 | mutex_lock(&info->lock); | 865 | /* Copy test parameters */ |
| 972 | ret = __run_threaded_test(info); | 866 | params->buf_size = test_buf_size; |
| 973 | mutex_unlock(&info->lock); | 867 | strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); |
| 974 | return ret; | 868 | strlcpy(params->device, strim(test_device), sizeof(params->device)); |
| 869 | params->threads_per_chan = threads_per_chan; | ||
| 870 | params->max_channels = max_channels; | ||
| 871 | params->iterations = iterations; | ||
| 872 | params->xor_sources = xor_sources; | ||
| 873 | params->pq_sources = pq_sources; | ||
| 874 | params->timeout = timeout; | ||
| 875 | params->noverify = noverify; | ||
| 876 | |||
| 877 | request_channels(info, DMA_MEMCPY); | ||
| 878 | request_channels(info, DMA_XOR); | ||
| 879 | request_channels(info, DMA_PQ); | ||
| 975 | } | 880 | } |
| 976 | #endif | ||
| 977 | 881 | ||
| 978 | static void __stop_threaded_test(struct dmatest_info *info) | 882 | static void stop_threaded_test(struct dmatest_info *info) |
| 979 | { | 883 | { |
| 980 | struct dmatest_chan *dtc, *_dtc; | 884 | struct dmatest_chan *dtc, *_dtc; |
| 981 | struct dma_chan *chan; | 885 | struct dma_chan *chan; |
| @@ -984,203 +888,86 @@ static void __stop_threaded_test(struct dmatest_info *info) | |||
| 984 | list_del(&dtc->node); | 888 | list_del(&dtc->node); |
| 985 | chan = dtc->chan; | 889 | chan = dtc->chan; |
| 986 | dmatest_cleanup_channel(dtc); | 890 | dmatest_cleanup_channel(dtc); |
| 987 | pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); | 891 | pr_debug("dropped channel %s\n", dma_chan_name(chan)); |
| 988 | dma_release_channel(chan); | 892 | dma_release_channel(chan); |
| 989 | } | 893 | } |
| 990 | 894 | ||
| 991 | info->nr_channels = 0; | 895 | info->nr_channels = 0; |
| 992 | } | 896 | } |
| 993 | 897 | ||
| 994 | static void stop_threaded_test(struct dmatest_info *info) | 898 | static void restart_threaded_test(struct dmatest_info *info, bool run) |
| 995 | { | 899 | { |
| 996 | mutex_lock(&info->lock); | 900 | /* we might be called early to set run=, defer running until all |
| 997 | __stop_threaded_test(info); | 901 | * parameters have been evaluated |
| 998 | mutex_unlock(&info->lock); | 902 | */ |
| 999 | } | 903 | if (!info->did_init) |
| 1000 | 904 | return; | |
| 1001 | static int __restart_threaded_test(struct dmatest_info *info, bool run) | ||
| 1002 | { | ||
| 1003 | struct dmatest_params *params = &info->params; | ||
| 1004 | 905 | ||
| 1005 | /* Stop any running test first */ | 906 | /* Stop any running test first */ |
| 1006 | __stop_threaded_test(info); | 907 | stop_threaded_test(info); |
| 1007 | |||
| 1008 | if (run == false) | ||
| 1009 | return 0; | ||
| 1010 | |||
| 1011 | /* Clear results from previous run */ | ||
| 1012 | result_free(info, NULL); | ||
| 1013 | |||
| 1014 | /* Copy test parameters */ | ||
| 1015 | params->buf_size = test_buf_size; | ||
| 1016 | strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); | ||
| 1017 | strlcpy(params->device, strim(test_device), sizeof(params->device)); | ||
| 1018 | params->threads_per_chan = threads_per_chan; | ||
| 1019 | params->max_channels = max_channels; | ||
| 1020 | params->iterations = iterations; | ||
| 1021 | params->xor_sources = xor_sources; | ||
| 1022 | params->pq_sources = pq_sources; | ||
| 1023 | params->timeout = timeout; | ||
| 1024 | 908 | ||
| 1025 | /* Run test with new parameters */ | 909 | /* Run test with new parameters */ |
| 1026 | return __run_threaded_test(info); | 910 | run_threaded_test(info); |
| 1027 | } | ||
| 1028 | |||
| 1029 | static bool __is_threaded_test_run(struct dmatest_info *info) | ||
| 1030 | { | ||
| 1031 | struct dmatest_chan *dtc; | ||
| 1032 | |||
| 1033 | list_for_each_entry(dtc, &info->channels, node) { | ||
| 1034 | struct dmatest_thread *thread; | ||
| 1035 | |||
| 1036 | list_for_each_entry(thread, &dtc->threads, node) { | ||
| 1037 | if (!thread->done) | ||
| 1038 | return true; | ||
| 1039 | } | ||
| 1040 | } | ||
| 1041 | |||
| 1042 | return false; | ||
| 1043 | } | 911 | } |
| 1044 | 912 | ||
| 1045 | static ssize_t dtf_read_run(struct file *file, char __user *user_buf, | 913 | static int dmatest_run_get(char *val, const struct kernel_param *kp) |
| 1046 | size_t count, loff_t *ppos) | ||
| 1047 | { | 914 | { |
| 1048 | struct dmatest_info *info = file->private_data; | 915 | struct dmatest_info *info = &test_info; |
| 1049 | char buf[3]; | ||
| 1050 | 916 | ||
| 1051 | mutex_lock(&info->lock); | 917 | mutex_lock(&info->lock); |
| 1052 | 918 | if (is_threaded_test_run(info)) { | |
| 1053 | if (__is_threaded_test_run(info)) { | 919 | dmatest_run = true; |
| 1054 | buf[0] = 'Y'; | ||
| 1055 | } else { | 920 | } else { |
| 1056 | __stop_threaded_test(info); | 921 | stop_threaded_test(info); |
| 1057 | buf[0] = 'N'; | 922 | dmatest_run = false; |
| 1058 | } | 923 | } |
| 1059 | |||
| 1060 | mutex_unlock(&info->lock); | 924 | mutex_unlock(&info->lock); |
| 1061 | buf[1] = '\n'; | ||
| 1062 | buf[2] = 0x00; | ||
| 1063 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, | ||
| 1067 | size_t count, loff_t *ppos) | ||
| 1068 | { | ||
| 1069 | struct dmatest_info *info = file->private_data; | ||
| 1070 | char buf[16]; | ||
| 1071 | bool bv; | ||
| 1072 | int ret = 0; | ||
| 1073 | 925 | ||
| 1074 | if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) | 926 | return param_get_bool(val, kp); |
| 1075 | return -EFAULT; | ||
| 1076 | |||
| 1077 | if (strtobool(buf, &bv) == 0) { | ||
| 1078 | mutex_lock(&info->lock); | ||
| 1079 | |||
| 1080 | if (__is_threaded_test_run(info)) | ||
| 1081 | ret = -EBUSY; | ||
| 1082 | else | ||
| 1083 | ret = __restart_threaded_test(info, bv); | ||
| 1084 | |||
| 1085 | mutex_unlock(&info->lock); | ||
| 1086 | } | ||
| 1087 | |||
| 1088 | return ret ? ret : count; | ||
| 1089 | } | 927 | } |
| 1090 | 928 | ||
| 1091 | static const struct file_operations dtf_run_fops = { | 929 | static int dmatest_run_set(const char *val, const struct kernel_param *kp) |
| 1092 | .read = dtf_read_run, | ||
| 1093 | .write = dtf_write_run, | ||
| 1094 | .open = simple_open, | ||
| 1095 | .llseek = default_llseek, | ||
| 1096 | }; | ||
| 1097 | |||
| 1098 | static int dtf_results_show(struct seq_file *sf, void *data) | ||
| 1099 | { | 930 | { |
| 1100 | struct dmatest_info *info = sf->private; | 931 | struct dmatest_info *info = &test_info; |
| 1101 | struct dmatest_result *result; | 932 | int ret; |
| 1102 | struct dmatest_thread_result *tr; | ||
| 1103 | unsigned int i; | ||
| 1104 | 933 | ||
| 1105 | mutex_lock(&info->results_lock); | 934 | mutex_lock(&info->lock); |
| 1106 | list_for_each_entry(result, &info->results, node) { | 935 | ret = param_set_bool(val, kp); |
| 1107 | list_for_each_entry(tr, &result->results, node) { | 936 | if (ret) { |
| 1108 | seq_printf(sf, "%s\n", | 937 | mutex_unlock(&info->lock); |
| 1109 | thread_result_get(result->name, tr)); | 938 | return ret; |
| 1110 | if (tr->type == DMATEST_ET_VERIFY_BUF) { | ||
| 1111 | for (i = 0; i < tr->vr->error_count; i++) { | ||
| 1112 | seq_printf(sf, "\t%s\n", | ||
| 1113 | verify_result_get_one(tr->vr, i)); | ||
| 1114 | } | ||
| 1115 | } | ||
| 1116 | } | ||
| 1117 | } | 939 | } |
| 1118 | 940 | ||
| 1119 | mutex_unlock(&info->results_lock); | 941 | if (is_threaded_test_run(info)) |
| 1120 | return 0; | 942 | ret = -EBUSY; |
| 1121 | } | 943 | else if (dmatest_run) |
| 1122 | 944 | restart_threaded_test(info, dmatest_run); | |
| 1123 | static int dtf_results_open(struct inode *inode, struct file *file) | ||
| 1124 | { | ||
| 1125 | return single_open(file, dtf_results_show, inode->i_private); | ||
| 1126 | } | ||
| 1127 | |||
| 1128 | static const struct file_operations dtf_results_fops = { | ||
| 1129 | .open = dtf_results_open, | ||
| 1130 | .read = seq_read, | ||
| 1131 | .llseek = seq_lseek, | ||
| 1132 | .release = single_release, | ||
| 1133 | }; | ||
| 1134 | |||
| 1135 | static int dmatest_register_dbgfs(struct dmatest_info *info) | ||
| 1136 | { | ||
| 1137 | struct dentry *d; | ||
| 1138 | |||
| 1139 | d = debugfs_create_dir("dmatest", NULL); | ||
| 1140 | if (IS_ERR(d)) | ||
| 1141 | return PTR_ERR(d); | ||
| 1142 | if (!d) | ||
| 1143 | goto err_root; | ||
| 1144 | 945 | ||
| 1145 | info->root = d; | 946 | mutex_unlock(&info->lock); |
| 1146 | |||
| 1147 | /* Run or stop threaded test */ | ||
| 1148 | debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info, | ||
| 1149 | &dtf_run_fops); | ||
| 1150 | |||
| 1151 | /* Results of test in progress */ | ||
| 1152 | debugfs_create_file("results", S_IRUGO, info->root, info, | ||
| 1153 | &dtf_results_fops); | ||
| 1154 | |||
| 1155 | return 0; | ||
| 1156 | 947 | ||
| 1157 | err_root: | 948 | return ret; |
| 1158 | pr_err("dmatest: Failed to initialize debugfs\n"); | ||
| 1159 | return -ENOMEM; | ||
| 1160 | } | 949 | } |
| 1161 | 950 | ||
| 1162 | static int __init dmatest_init(void) | 951 | static int __init dmatest_init(void) |
| 1163 | { | 952 | { |
| 1164 | struct dmatest_info *info = &test_info; | 953 | struct dmatest_info *info = &test_info; |
| 1165 | int ret; | 954 | struct dmatest_params *params = &info->params; |
| 1166 | |||
| 1167 | memset(info, 0, sizeof(*info)); | ||
| 1168 | 955 | ||
| 1169 | mutex_init(&info->lock); | 956 | if (dmatest_run) { |
| 1170 | INIT_LIST_HEAD(&info->channels); | 957 | mutex_lock(&info->lock); |
| 958 | run_threaded_test(info); | ||
| 959 | mutex_unlock(&info->lock); | ||
| 960 | } | ||
| 1171 | 961 | ||
| 1172 | mutex_init(&info->results_lock); | 962 | if (params->iterations && wait) |
| 1173 | INIT_LIST_HEAD(&info->results); | 963 | wait_event(thread_wait, !is_threaded_test_run(info)); |
| 1174 | 964 | ||
| 1175 | ret = dmatest_register_dbgfs(info); | 965 | /* module parameters are stable, inittime tests are started, |
| 1176 | if (ret) | 966 | * let userspace take over 'run' control |
| 1177 | return ret; | 967 | */ |
| 968 | info->did_init = true; | ||
| 1178 | 969 | ||
| 1179 | #ifdef MODULE | ||
| 1180 | return 0; | 970 | return 0; |
| 1181 | #else | ||
| 1182 | return run_threaded_test(info); | ||
| 1183 | #endif | ||
| 1184 | } | 971 | } |
| 1185 | /* when compiled-in wait for drivers to load first */ | 972 | /* when compiled-in wait for drivers to load first */ |
| 1186 | late_initcall(dmatest_init); | 973 | late_initcall(dmatest_init); |
| @@ -1189,9 +976,9 @@ static void __exit dmatest_exit(void) | |||
| 1189 | { | 976 | { |
| 1190 | struct dmatest_info *info = &test_info; | 977 | struct dmatest_info *info = &test_info; |
| 1191 | 978 | ||
| 1192 | debugfs_remove_recursive(info->root); | 979 | mutex_lock(&info->lock); |
| 1193 | stop_threaded_test(info); | 980 | stop_threaded_test(info); |
| 1194 | result_free(info, NULL); | 981 | mutex_unlock(&info->lock); |
| 1195 | } | 982 | } |
| 1196 | module_exit(dmatest_exit); | 983 | module_exit(dmatest_exit); |
| 1197 | 984 | ||
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 89eb89f22284..7516be4677cf 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
| @@ -85,10 +85,6 @@ static struct device *chan2dev(struct dma_chan *chan) | |||
| 85 | { | 85 | { |
| 86 | return &chan->dev->device; | 86 | return &chan->dev->device; |
| 87 | } | 87 | } |
| 88 | static struct device *chan2parent(struct dma_chan *chan) | ||
| 89 | { | ||
| 90 | return chan->dev->device.parent; | ||
| 91 | } | ||
| 92 | 88 | ||
| 93 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | 89 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
| 94 | { | 90 | { |
| @@ -311,26 +307,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, | |||
| 311 | list_splice_init(&desc->tx_list, &dwc->free_list); | 307 | list_splice_init(&desc->tx_list, &dwc->free_list); |
| 312 | list_move(&desc->desc_node, &dwc->free_list); | 308 | list_move(&desc->desc_node, &dwc->free_list); |
| 313 | 309 | ||
| 314 | if (!is_slave_direction(dwc->direction)) { | 310 | dma_descriptor_unmap(txd); |
| 315 | struct device *parent = chan2parent(&dwc->chan); | ||
| 316 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
| 317 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
| 318 | dma_unmap_single(parent, desc->lli.dar, | ||
| 319 | desc->total_len, DMA_FROM_DEVICE); | ||
| 320 | else | ||
| 321 | dma_unmap_page(parent, desc->lli.dar, | ||
| 322 | desc->total_len, DMA_FROM_DEVICE); | ||
| 323 | } | ||
| 324 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 325 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
| 326 | dma_unmap_single(parent, desc->lli.sar, | ||
| 327 | desc->total_len, DMA_TO_DEVICE); | ||
| 328 | else | ||
| 329 | dma_unmap_page(parent, desc->lli.sar, | ||
| 330 | desc->total_len, DMA_TO_DEVICE); | ||
| 331 | } | ||
| 332 | } | ||
| 333 | |||
| 334 | spin_unlock_irqrestore(&dwc->lock, flags); | 311 | spin_unlock_irqrestore(&dwc->lock, flags); |
| 335 | 312 | ||
| 336 | if (callback) | 313 | if (callback) |
| @@ -1098,13 +1075,13 @@ dwc_tx_status(struct dma_chan *chan, | |||
| 1098 | enum dma_status ret; | 1075 | enum dma_status ret; |
| 1099 | 1076 | ||
| 1100 | ret = dma_cookie_status(chan, cookie, txstate); | 1077 | ret = dma_cookie_status(chan, cookie, txstate); |
| 1101 | if (ret == DMA_SUCCESS) | 1078 | if (ret == DMA_COMPLETE) |
| 1102 | return ret; | 1079 | return ret; |
| 1103 | 1080 | ||
| 1104 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 1081 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
| 1105 | 1082 | ||
| 1106 | ret = dma_cookie_status(chan, cookie, txstate); | 1083 | ret = dma_cookie_status(chan, cookie, txstate); |
| 1107 | if (ret != DMA_SUCCESS) | 1084 | if (ret != DMA_COMPLETE) |
| 1108 | dma_set_residue(txstate, dwc_get_residue(dwc)); | 1085 | dma_set_residue(txstate, dwc_get_residue(dwc)); |
| 1109 | 1086 | ||
| 1110 | if (dwc->paused && ret == DMA_IN_PROGRESS) | 1087 | if (dwc->paused && ret == DMA_IN_PROGRESS) |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index bef8a368c8dd..2539ea0cbc63 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
| @@ -46,14 +46,21 @@ | |||
| 46 | #define EDMA_CHANS 64 | 46 | #define EDMA_CHANS 64 |
| 47 | #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ | 47 | #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ |
| 48 | 48 | ||
| 49 | /* Max of 16 segments per channel to conserve PaRAM slots */ | 49 | /* |
| 50 | #define MAX_NR_SG 16 | 50 | * Max of 20 segments per channel to conserve PaRAM slots |
| 51 | * Also note that MAX_NR_SG should be atleast the no.of periods | ||
| 52 | * that are required for ASoC, otherwise DMA prep calls will | ||
| 53 | * fail. Today davinci-pcm is the only user of this driver and | ||
| 54 | * requires atleast 17 slots, so we setup the default to 20. | ||
| 55 | */ | ||
| 56 | #define MAX_NR_SG 20 | ||
| 51 | #define EDMA_MAX_SLOTS MAX_NR_SG | 57 | #define EDMA_MAX_SLOTS MAX_NR_SG |
| 52 | #define EDMA_DESCRIPTORS 16 | 58 | #define EDMA_DESCRIPTORS 16 |
| 53 | 59 | ||
| 54 | struct edma_desc { | 60 | struct edma_desc { |
| 55 | struct virt_dma_desc vdesc; | 61 | struct virt_dma_desc vdesc; |
| 56 | struct list_head node; | 62 | struct list_head node; |
| 63 | int cyclic; | ||
| 57 | int absync; | 64 | int absync; |
| 58 | int pset_nr; | 65 | int pset_nr; |
| 59 | int processed; | 66 | int processed; |
| @@ -167,8 +174,13 @@ static void edma_execute(struct edma_chan *echan) | |||
| 167 | * then setup a link to the dummy slot, this results in all future | 174 | * then setup a link to the dummy slot, this results in all future |
| 168 | * events being absorbed and that's OK because we're done | 175 | * events being absorbed and that's OK because we're done |
| 169 | */ | 176 | */ |
| 170 | if (edesc->processed == edesc->pset_nr) | 177 | if (edesc->processed == edesc->pset_nr) { |
| 171 | edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot); | 178 | if (edesc->cyclic) |
| 179 | edma_link(echan->slot[nslots-1], echan->slot[1]); | ||
| 180 | else | ||
| 181 | edma_link(echan->slot[nslots-1], | ||
| 182 | echan->ecc->dummy_slot); | ||
| 183 | } | ||
| 172 | 184 | ||
| 173 | edma_resume(echan->ch_num); | 185 | edma_resume(echan->ch_num); |
| 174 | 186 | ||
| @@ -250,6 +262,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
| 250 | return ret; | 262 | return ret; |
| 251 | } | 263 | } |
| 252 | 264 | ||
| 265 | /* | ||
| 266 | * A PaRAM set configuration abstraction used by other modes | ||
| 267 | * @chan: Channel who's PaRAM set we're configuring | ||
| 268 | * @pset: PaRAM set to initialize and setup. | ||
| 269 | * @src_addr: Source address of the DMA | ||
| 270 | * @dst_addr: Destination address of the DMA | ||
| 271 | * @burst: In units of dev_width, how much to send | ||
| 272 | * @dev_width: How much is the dev_width | ||
| 273 | * @dma_length: Total length of the DMA transfer | ||
| 274 | * @direction: Direction of the transfer | ||
| 275 | */ | ||
| 276 | static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, | ||
| 277 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, | ||
| 278 | enum dma_slave_buswidth dev_width, unsigned int dma_length, | ||
| 279 | enum dma_transfer_direction direction) | ||
| 280 | { | ||
| 281 | struct edma_chan *echan = to_edma_chan(chan); | ||
| 282 | struct device *dev = chan->device->dev; | ||
| 283 | int acnt, bcnt, ccnt, cidx; | ||
| 284 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | ||
| 285 | int absync; | ||
| 286 | |||
| 287 | acnt = dev_width; | ||
| 288 | /* | ||
| 289 | * If the maxburst is equal to the fifo width, use | ||
| 290 | * A-synced transfers. This allows for large contiguous | ||
| 291 | * buffer transfers using only one PaRAM set. | ||
| 292 | */ | ||
| 293 | if (burst == 1) { | ||
| 294 | /* | ||
| 295 | * For the A-sync case, bcnt and ccnt are the remainder | ||
| 296 | * and quotient respectively of the division of: | ||
| 297 | * (dma_length / acnt) by (SZ_64K -1). This is so | ||
| 298 | * that in case bcnt over flows, we have ccnt to use. | ||
| 299 | * Note: In A-sync tranfer only, bcntrld is used, but it | ||
| 300 | * only applies for sg_dma_len(sg) >= SZ_64K. | ||
| 301 | * In this case, the best way adopted is- bccnt for the | ||
| 302 | * first frame will be the remainder below. Then for | ||
| 303 | * every successive frame, bcnt will be SZ_64K-1. This | ||
| 304 | * is assured as bcntrld = 0xffff in end of function. | ||
| 305 | */ | ||
| 306 | absync = false; | ||
| 307 | ccnt = dma_length / acnt / (SZ_64K - 1); | ||
| 308 | bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); | ||
| 309 | /* | ||
| 310 | * If bcnt is non-zero, we have a remainder and hence an | ||
| 311 | * extra frame to transfer, so increment ccnt. | ||
| 312 | */ | ||
| 313 | if (bcnt) | ||
| 314 | ccnt++; | ||
| 315 | else | ||
| 316 | bcnt = SZ_64K - 1; | ||
| 317 | cidx = acnt; | ||
| 318 | } else { | ||
| 319 | /* | ||
| 320 | * If maxburst is greater than the fifo address_width, | ||
| 321 | * use AB-synced transfers where A count is the fifo | ||
| 322 | * address_width and B count is the maxburst. In this | ||
| 323 | * case, we are limited to transfers of C count frames | ||
| 324 | * of (address_width * maxburst) where C count is limited | ||
| 325 | * to SZ_64K-1. This places an upper bound on the length | ||
| 326 | * of an SG segment that can be handled. | ||
| 327 | */ | ||
| 328 | absync = true; | ||
| 329 | bcnt = burst; | ||
| 330 | ccnt = dma_length / (acnt * bcnt); | ||
| 331 | if (ccnt > (SZ_64K - 1)) { | ||
| 332 | dev_err(dev, "Exceeded max SG segment size\n"); | ||
| 333 | return -EINVAL; | ||
| 334 | } | ||
| 335 | cidx = acnt * bcnt; | ||
| 336 | } | ||
| 337 | |||
| 338 | if (direction == DMA_MEM_TO_DEV) { | ||
| 339 | src_bidx = acnt; | ||
| 340 | src_cidx = cidx; | ||
| 341 | dst_bidx = 0; | ||
| 342 | dst_cidx = 0; | ||
| 343 | } else if (direction == DMA_DEV_TO_MEM) { | ||
| 344 | src_bidx = 0; | ||
| 345 | src_cidx = 0; | ||
| 346 | dst_bidx = acnt; | ||
| 347 | dst_cidx = cidx; | ||
| 348 | } else { | ||
| 349 | dev_err(dev, "%s: direction not implemented yet\n", __func__); | ||
| 350 | return -EINVAL; | ||
| 351 | } | ||
| 352 | |||
| 353 | pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); | ||
| 354 | /* Configure A or AB synchronized transfers */ | ||
| 355 | if (absync) | ||
| 356 | pset->opt |= SYNCDIM; | ||
| 357 | |||
| 358 | pset->src = src_addr; | ||
| 359 | pset->dst = dst_addr; | ||
| 360 | |||
| 361 | pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; | ||
| 362 | pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; | ||
| 363 | |||
| 364 | pset->a_b_cnt = bcnt << 16 | acnt; | ||
| 365 | pset->ccnt = ccnt; | ||
| 366 | /* | ||
| 367 | * Only time when (bcntrld) auto reload is required is for | ||
| 368 | * A-sync case, and in this case, a requirement of reload value | ||
| 369 | * of SZ_64K-1 only is assured. 'link' is initially set to NULL | ||
| 370 | * and then later will be populated by edma_execute. | ||
| 371 | */ | ||
| 372 | pset->link_bcntrld = 0xffffffff; | ||
| 373 | return absync; | ||
| 374 | } | ||
| 375 | |||
| 253 | static struct dma_async_tx_descriptor *edma_prep_slave_sg( | 376 | static struct dma_async_tx_descriptor *edma_prep_slave_sg( |
| 254 | struct dma_chan *chan, struct scatterlist *sgl, | 377 | struct dma_chan *chan, struct scatterlist *sgl, |
| 255 | unsigned int sg_len, enum dma_transfer_direction direction, | 378 | unsigned int sg_len, enum dma_transfer_direction direction, |
| @@ -258,23 +381,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
| 258 | struct edma_chan *echan = to_edma_chan(chan); | 381 | struct edma_chan *echan = to_edma_chan(chan); |
| 259 | struct device *dev = chan->device->dev; | 382 | struct device *dev = chan->device->dev; |
| 260 | struct edma_desc *edesc; | 383 | struct edma_desc *edesc; |
| 261 | dma_addr_t dev_addr; | 384 | dma_addr_t src_addr = 0, dst_addr = 0; |
| 262 | enum dma_slave_buswidth dev_width; | 385 | enum dma_slave_buswidth dev_width; |
| 263 | u32 burst; | 386 | u32 burst; |
| 264 | struct scatterlist *sg; | 387 | struct scatterlist *sg; |
| 265 | int acnt, bcnt, ccnt, src, dst, cidx; | 388 | int i, nslots, ret; |
| 266 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | ||
| 267 | int i, nslots; | ||
| 268 | 389 | ||
| 269 | if (unlikely(!echan || !sgl || !sg_len)) | 390 | if (unlikely(!echan || !sgl || !sg_len)) |
| 270 | return NULL; | 391 | return NULL; |
| 271 | 392 | ||
| 272 | if (direction == DMA_DEV_TO_MEM) { | 393 | if (direction == DMA_DEV_TO_MEM) { |
| 273 | dev_addr = echan->cfg.src_addr; | 394 | src_addr = echan->cfg.src_addr; |
| 274 | dev_width = echan->cfg.src_addr_width; | 395 | dev_width = echan->cfg.src_addr_width; |
| 275 | burst = echan->cfg.src_maxburst; | 396 | burst = echan->cfg.src_maxburst; |
| 276 | } else if (direction == DMA_MEM_TO_DEV) { | 397 | } else if (direction == DMA_MEM_TO_DEV) { |
| 277 | dev_addr = echan->cfg.dst_addr; | 398 | dst_addr = echan->cfg.dst_addr; |
| 278 | dev_width = echan->cfg.dst_addr_width; | 399 | dev_width = echan->cfg.dst_addr_width; |
| 279 | burst = echan->cfg.dst_maxburst; | 400 | burst = echan->cfg.dst_maxburst; |
| 280 | } else { | 401 | } else { |
| @@ -307,7 +428,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
| 307 | if (echan->slot[i] < 0) { | 428 | if (echan->slot[i] < 0) { |
| 308 | kfree(edesc); | 429 | kfree(edesc); |
| 309 | dev_err(dev, "Failed to allocate slot\n"); | 430 | dev_err(dev, "Failed to allocate slot\n"); |
| 310 | kfree(edesc); | ||
| 311 | return NULL; | 431 | return NULL; |
| 312 | } | 432 | } |
| 313 | } | 433 | } |
| @@ -315,64 +435,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
| 315 | 435 | ||
| 316 | /* Configure PaRAM sets for each SG */ | 436 | /* Configure PaRAM sets for each SG */ |
| 317 | for_each_sg(sgl, sg, sg_len, i) { | 437 | for_each_sg(sgl, sg, sg_len, i) { |
| 318 | 438 | /* Get address for each SG */ | |
| 319 | acnt = dev_width; | 439 | if (direction == DMA_DEV_TO_MEM) |
| 320 | 440 | dst_addr = sg_dma_address(sg); | |
| 321 | /* | 441 | else |
| 322 | * If the maxburst is equal to the fifo width, use | 442 | src_addr = sg_dma_address(sg); |
| 323 | * A-synced transfers. This allows for large contiguous | 443 | |
| 324 | * buffer transfers using only one PaRAM set. | 444 | ret = edma_config_pset(chan, &edesc->pset[i], src_addr, |
| 325 | */ | 445 | dst_addr, burst, dev_width, |
| 326 | if (burst == 1) { | 446 | sg_dma_len(sg), direction); |
| 327 | edesc->absync = false; | 447 | if (ret < 0) { |
| 328 | ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); | 448 | kfree(edesc); |
| 329 | bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); | 449 | return NULL; |
| 330 | if (bcnt) | ||
| 331 | ccnt++; | ||
| 332 | else | ||
| 333 | bcnt = SZ_64K - 1; | ||
| 334 | cidx = acnt; | ||
| 335 | /* | ||
| 336 | * If maxburst is greater than the fifo address_width, | ||
| 337 | * use AB-synced transfers where A count is the fifo | ||
| 338 | * address_width and B count is the maxburst. In this | ||
| 339 | * case, we are limited to transfers of C count frames | ||
| 340 | * of (address_width * maxburst) where C count is limited | ||
| 341 | * to SZ_64K-1. This places an upper bound on the length | ||
| 342 | * of an SG segment that can be handled. | ||
| 343 | */ | ||
| 344 | } else { | ||
| 345 | edesc->absync = true; | ||
| 346 | bcnt = burst; | ||
| 347 | ccnt = sg_dma_len(sg) / (acnt * bcnt); | ||
| 348 | if (ccnt > (SZ_64K - 1)) { | ||
| 349 | dev_err(dev, "Exceeded max SG segment size\n"); | ||
| 350 | kfree(edesc); | ||
| 351 | return NULL; | ||
| 352 | } | ||
| 353 | cidx = acnt * bcnt; | ||
| 354 | } | 450 | } |
| 355 | 451 | ||
| 356 | if (direction == DMA_MEM_TO_DEV) { | 452 | edesc->absync = ret; |
| 357 | src = sg_dma_address(sg); | ||
| 358 | dst = dev_addr; | ||
| 359 | src_bidx = acnt; | ||
| 360 | src_cidx = cidx; | ||
| 361 | dst_bidx = 0; | ||
| 362 | dst_cidx = 0; | ||
| 363 | } else { | ||
| 364 | src = dev_addr; | ||
| 365 | dst = sg_dma_address(sg); | ||
| 366 | src_bidx = 0; | ||
| 367 | src_cidx = 0; | ||
| 368 | dst_bidx = acnt; | ||
| 369 | dst_cidx = cidx; | ||
| 370 | } | ||
| 371 | |||
| 372 | edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); | ||
| 373 | /* Configure A or AB synchronized transfers */ | ||
| 374 | if (edesc->absync) | ||
| 375 | edesc->pset[i].opt |= SYNCDIM; | ||
| 376 | 453 | ||
| 377 | /* If this is the last in a current SG set of transactions, | 454 | /* If this is the last in a current SG set of transactions, |
| 378 | enable interrupts so that next set is processed */ | 455 | enable interrupts so that next set is processed */ |
| @@ -382,17 +459,138 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
| 382 | /* If this is the last set, enable completion interrupt flag */ | 459 | /* If this is the last set, enable completion interrupt flag */ |
| 383 | if (i == sg_len - 1) | 460 | if (i == sg_len - 1) |
| 384 | edesc->pset[i].opt |= TCINTEN; | 461 | edesc->pset[i].opt |= TCINTEN; |
| 462 | } | ||
| 385 | 463 | ||
| 386 | edesc->pset[i].src = src; | 464 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
| 387 | edesc->pset[i].dst = dst; | 465 | } |
| 388 | 466 | ||
| 389 | edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx; | 467 | static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( |
| 390 | edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx; | 468 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
| 469 | size_t period_len, enum dma_transfer_direction direction, | ||
| 470 | unsigned long tx_flags, void *context) | ||
| 471 | { | ||
| 472 | struct edma_chan *echan = to_edma_chan(chan); | ||
| 473 | struct device *dev = chan->device->dev; | ||
| 474 | struct edma_desc *edesc; | ||
| 475 | dma_addr_t src_addr, dst_addr; | ||
| 476 | enum dma_slave_buswidth dev_width; | ||
| 477 | u32 burst; | ||
| 478 | int i, ret, nslots; | ||
| 479 | |||
| 480 | if (unlikely(!echan || !buf_len || !period_len)) | ||
| 481 | return NULL; | ||
| 482 | |||
| 483 | if (direction == DMA_DEV_TO_MEM) { | ||
| 484 | src_addr = echan->cfg.src_addr; | ||
| 485 | dst_addr = buf_addr; | ||
| 486 | dev_width = echan->cfg.src_addr_width; | ||
| 487 | burst = echan->cfg.src_maxburst; | ||
| 488 | } else if (direction == DMA_MEM_TO_DEV) { | ||
| 489 | src_addr = buf_addr; | ||
| 490 | dst_addr = echan->cfg.dst_addr; | ||
| 491 | dev_width = echan->cfg.dst_addr_width; | ||
| 492 | burst = echan->cfg.dst_maxburst; | ||
| 493 | } else { | ||
| 494 | dev_err(dev, "%s: bad direction?\n", __func__); | ||
| 495 | return NULL; | ||
| 496 | } | ||
| 497 | |||
| 498 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | ||
| 499 | dev_err(dev, "Undefined slave buswidth\n"); | ||
| 500 | return NULL; | ||
| 501 | } | ||
| 502 | |||
| 503 | if (unlikely(buf_len % period_len)) { | ||
| 504 | dev_err(dev, "Period should be multiple of Buffer length\n"); | ||
| 505 | return NULL; | ||
| 506 | } | ||
| 507 | |||
| 508 | nslots = (buf_len / period_len) + 1; | ||
| 509 | |||
| 510 | /* | ||
| 511 | * Cyclic DMA users such as audio cannot tolerate delays introduced | ||
| 512 | * by cases where the number of periods is more than the maximum | ||
| 513 | * number of SGs the EDMA driver can handle at a time. For DMA types | ||
| 514 | * such as Slave SGs, such delays are tolerable and synchronized, | ||
| 515 | * but the synchronization is difficult to achieve with Cyclic and | ||
| 516 | * cannot be guaranteed, so we error out early. | ||
| 517 | */ | ||
| 518 | if (nslots > MAX_NR_SG) | ||
| 519 | return NULL; | ||
| 520 | |||
| 521 | edesc = kzalloc(sizeof(*edesc) + nslots * | ||
| 522 | sizeof(edesc->pset[0]), GFP_ATOMIC); | ||
| 523 | if (!edesc) { | ||
| 524 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | ||
| 525 | return NULL; | ||
| 526 | } | ||
| 527 | |||
| 528 | edesc->cyclic = 1; | ||
| 529 | edesc->pset_nr = nslots; | ||
| 530 | |||
| 531 | dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots); | ||
| 532 | dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len); | ||
| 533 | dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len); | ||
| 534 | |||
| 535 | for (i = 0; i < nslots; i++) { | ||
| 536 | /* Allocate a PaRAM slot, if needed */ | ||
| 537 | if (echan->slot[i] < 0) { | ||
| 538 | echan->slot[i] = | ||
| 539 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), | ||
| 540 | EDMA_SLOT_ANY); | ||
| 541 | if (echan->slot[i] < 0) { | ||
| 542 | dev_err(dev, "Failed to allocate slot\n"); | ||
| 543 | return NULL; | ||
| 544 | } | ||
| 545 | } | ||
| 546 | |||
| 547 | if (i == nslots - 1) { | ||
| 548 | memcpy(&edesc->pset[i], &edesc->pset[0], | ||
| 549 | sizeof(edesc->pset[0])); | ||
| 550 | break; | ||
| 551 | } | ||
| 552 | |||
| 553 | ret = edma_config_pset(chan, &edesc->pset[i], src_addr, | ||
| 554 | dst_addr, burst, dev_width, period_len, | ||
| 555 | direction); | ||
| 556 | if (ret < 0) | ||
| 557 | return NULL; | ||
| 391 | 558 | ||
| 392 | edesc->pset[i].a_b_cnt = bcnt << 16 | acnt; | 559 | if (direction == DMA_DEV_TO_MEM) |
| 393 | edesc->pset[i].ccnt = ccnt; | 560 | dst_addr += period_len; |
| 394 | edesc->pset[i].link_bcntrld = 0xffffffff; | 561 | else |
| 562 | src_addr += period_len; | ||
| 395 | 563 | ||
| 564 | dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i); | ||
| 565 | dev_dbg(dev, | ||
| 566 | "\n pset[%d]:\n" | ||
| 567 | " chnum\t%d\n" | ||
| 568 | " slot\t%d\n" | ||
| 569 | " opt\t%08x\n" | ||
| 570 | " src\t%08x\n" | ||
| 571 | " dst\t%08x\n" | ||
| 572 | " abcnt\t%08x\n" | ||
| 573 | " ccnt\t%08x\n" | ||
| 574 | " bidx\t%08x\n" | ||
| 575 | " cidx\t%08x\n" | ||
| 576 | " lkrld\t%08x\n", | ||
| 577 | i, echan->ch_num, echan->slot[i], | ||
| 578 | edesc->pset[i].opt, | ||
| 579 | edesc->pset[i].src, | ||
| 580 | edesc->pset[i].dst, | ||
| 581 | edesc->pset[i].a_b_cnt, | ||
| 582 | edesc->pset[i].ccnt, | ||
| 583 | edesc->pset[i].src_dst_bidx, | ||
| 584 | edesc->pset[i].src_dst_cidx, | ||
| 585 | edesc->pset[i].link_bcntrld); | ||
| 586 | |||
| 587 | edesc->absync = ret; | ||
| 588 | |||
| 589 | /* | ||
| 590 | * Enable interrupts for every period because callback | ||
| 591 | * has to be called for every period. | ||
| 592 | */ | ||
| 593 | edesc->pset[i].opt |= TCINTEN; | ||
| 396 | } | 594 | } |
| 397 | 595 | ||
| 398 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | 596 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
| @@ -406,30 +604,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | |||
| 406 | unsigned long flags; | 604 | unsigned long flags; |
| 407 | struct edmacc_param p; | 605 | struct edmacc_param p; |
| 408 | 606 | ||
| 409 | /* Pause the channel */ | 607 | edesc = echan->edesc; |
| 410 | edma_pause(echan->ch_num); | 608 | |
| 609 | /* Pause the channel for non-cyclic */ | ||
| 610 | if (!edesc || (edesc && !edesc->cyclic)) | ||
| 611 | edma_pause(echan->ch_num); | ||
| 411 | 612 | ||
| 412 | switch (ch_status) { | 613 | switch (ch_status) { |
| 413 | case DMA_COMPLETE: | 614 | case EDMA_DMA_COMPLETE: |
| 414 | spin_lock_irqsave(&echan->vchan.lock, flags); | 615 | spin_lock_irqsave(&echan->vchan.lock, flags); |
| 415 | 616 | ||
| 416 | edesc = echan->edesc; | ||
| 417 | if (edesc) { | 617 | if (edesc) { |
| 418 | if (edesc->processed == edesc->pset_nr) { | 618 | if (edesc->cyclic) { |
| 619 | vchan_cyclic_callback(&edesc->vdesc); | ||
| 620 | } else if (edesc->processed == edesc->pset_nr) { | ||
| 419 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); | 621 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); |
| 420 | edma_stop(echan->ch_num); | 622 | edma_stop(echan->ch_num); |
| 421 | vchan_cookie_complete(&edesc->vdesc); | 623 | vchan_cookie_complete(&edesc->vdesc); |
| 624 | edma_execute(echan); | ||
| 422 | } else { | 625 | } else { |
| 423 | dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); | 626 | dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); |
| 627 | edma_execute(echan); | ||
| 424 | } | 628 | } |
| 425 | |||
| 426 | edma_execute(echan); | ||
| 427 | } | 629 | } |
| 428 | 630 | ||
| 429 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | 631 | spin_unlock_irqrestore(&echan->vchan.lock, flags); |
| 430 | 632 | ||
| 431 | break; | 633 | break; |
| 432 | case DMA_CC_ERROR: | 634 | case EDMA_DMA_CC_ERROR: |
| 433 | spin_lock_irqsave(&echan->vchan.lock, flags); | 635 | spin_lock_irqsave(&echan->vchan.lock, flags); |
| 434 | 636 | ||
| 435 | edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); | 637 | edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); |
| @@ -579,7 +781,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, | |||
| 579 | unsigned long flags; | 781 | unsigned long flags; |
| 580 | 782 | ||
| 581 | ret = dma_cookie_status(chan, cookie, txstate); | 783 | ret = dma_cookie_status(chan, cookie, txstate); |
| 582 | if (ret == DMA_SUCCESS || !txstate) | 784 | if (ret == DMA_COMPLETE || !txstate) |
| 583 | return ret; | 785 | return ret; |
| 584 | 786 | ||
| 585 | spin_lock_irqsave(&echan->vchan.lock, flags); | 787 | spin_lock_irqsave(&echan->vchan.lock, flags); |
| @@ -619,6 +821,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | |||
| 619 | struct device *dev) | 821 | struct device *dev) |
| 620 | { | 822 | { |
| 621 | dma->device_prep_slave_sg = edma_prep_slave_sg; | 823 | dma->device_prep_slave_sg = edma_prep_slave_sg; |
| 824 | dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; | ||
| 622 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; | 825 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; |
| 623 | dma->device_free_chan_resources = edma_free_chan_resources; | 826 | dma->device_free_chan_resources = edma_free_chan_resources; |
| 624 | dma->device_issue_pending = edma_issue_pending; | 827 | dma->device_issue_pending = edma_issue_pending; |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 591cd8c63abb..cb4bf682a708 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
| @@ -733,28 +733,6 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) | |||
| 733 | spin_unlock_irqrestore(&edmac->lock, flags); | 733 | spin_unlock_irqrestore(&edmac->lock, flags); |
| 734 | } | 734 | } |
| 735 | 735 | ||
| 736 | static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc) | ||
| 737 | { | ||
| 738 | struct device *dev = desc->txd.chan->device->dev; | ||
| 739 | |||
| 740 | if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 741 | if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
| 742 | dma_unmap_single(dev, desc->src_addr, desc->size, | ||
| 743 | DMA_TO_DEVICE); | ||
| 744 | else | ||
| 745 | dma_unmap_page(dev, desc->src_addr, desc->size, | ||
| 746 | DMA_TO_DEVICE); | ||
| 747 | } | ||
| 748 | if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
| 749 | if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
| 750 | dma_unmap_single(dev, desc->dst_addr, desc->size, | ||
| 751 | DMA_FROM_DEVICE); | ||
| 752 | else | ||
| 753 | dma_unmap_page(dev, desc->dst_addr, desc->size, | ||
| 754 | DMA_FROM_DEVICE); | ||
| 755 | } | ||
| 756 | } | ||
| 757 | |||
| 758 | static void ep93xx_dma_tasklet(unsigned long data) | 736 | static void ep93xx_dma_tasklet(unsigned long data) |
| 759 | { | 737 | { |
| 760 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; | 738 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; |
| @@ -787,13 +765,7 @@ static void ep93xx_dma_tasklet(unsigned long data) | |||
| 787 | 765 | ||
| 788 | /* Now we can release all the chained descriptors */ | 766 | /* Now we can release all the chained descriptors */ |
| 789 | list_for_each_entry_safe(desc, d, &list, node) { | 767 | list_for_each_entry_safe(desc, d, &list, node) { |
| 790 | /* | 768 | dma_descriptor_unmap(&desc->txd); |
| 791 | * For the memcpy channels the API requires us to unmap the | ||
| 792 | * buffers unless requested otherwise. | ||
| 793 | */ | ||
| 794 | if (!edmac->chan.private) | ||
| 795 | ep93xx_dma_unmap_buffers(desc); | ||
| 796 | |||
| 797 | ep93xx_dma_desc_put(edmac, desc); | 769 | ep93xx_dma_desc_put(edmac, desc); |
| 798 | } | 770 | } |
| 799 | 771 | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 61517dd0d0b7..7086a16a55f2 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
| @@ -870,22 +870,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, | |||
| 870 | /* Run any dependencies */ | 870 | /* Run any dependencies */ |
| 871 | dma_run_dependencies(txd); | 871 | dma_run_dependencies(txd); |
| 872 | 872 | ||
| 873 | /* Unmap the dst buffer, if requested */ | 873 | dma_descriptor_unmap(txd); |
| 874 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
| 875 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
| 876 | dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE); | ||
| 877 | else | ||
| 878 | dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE); | ||
| 879 | } | ||
| 880 | |||
| 881 | /* Unmap the src buffer, if requested */ | ||
| 882 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 883 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
| 884 | dma_unmap_single(dev, src, len, DMA_TO_DEVICE); | ||
| 885 | else | ||
| 886 | dma_unmap_page(dev, src, len, DMA_TO_DEVICE); | ||
| 887 | } | ||
| 888 | |||
| 889 | #ifdef FSL_DMA_LD_DEBUG | 874 | #ifdef FSL_DMA_LD_DEBUG |
| 890 | chan_dbg(chan, "LD %p free\n", desc); | 875 | chan_dbg(chan, "LD %p free\n", desc); |
| 891 | #endif | 876 | #endif |
| @@ -1255,7 +1240,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, | |||
| 1255 | WARN_ON(fdev->feature != chan->feature); | 1240 | WARN_ON(fdev->feature != chan->feature); |
| 1256 | 1241 | ||
| 1257 | chan->dev = fdev->dev; | 1242 | chan->dev = fdev->dev; |
| 1258 | chan->id = ((res.start - 0x100) & 0xfff) >> 7; | 1243 | chan->id = (res.start & 0xfff) < 0x300 ? |
| 1244 | ((res.start - 0x100) & 0xfff) >> 7 : | ||
| 1245 | ((res.start - 0x200) & 0xfff) >> 7; | ||
| 1259 | if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { | 1246 | if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { |
| 1260 | dev_err(fdev->dev, "too many channels for device\n"); | 1247 | dev_err(fdev->dev, "too many channels for device\n"); |
| 1261 | err = -EINVAL; | 1248 | err = -EINVAL; |
| @@ -1428,6 +1415,7 @@ static int fsldma_of_remove(struct platform_device *op) | |||
| 1428 | } | 1415 | } |
| 1429 | 1416 | ||
| 1430 | static const struct of_device_id fsldma_of_ids[] = { | 1417 | static const struct of_device_id fsldma_of_ids[] = { |
| 1418 | { .compatible = "fsl,elo3-dma", }, | ||
| 1431 | { .compatible = "fsl,eloplus-dma", }, | 1419 | { .compatible = "fsl,eloplus-dma", }, |
| 1432 | { .compatible = "fsl,elo-dma", }, | 1420 | { .compatible = "fsl,elo-dma", }, |
| 1433 | {} | 1421 | {} |
| @@ -1449,7 +1437,7 @@ static struct platform_driver fsldma_of_driver = { | |||
| 1449 | 1437 | ||
| 1450 | static __init int fsldma_init(void) | 1438 | static __init int fsldma_init(void) |
| 1451 | { | 1439 | { |
| 1452 | pr_info("Freescale Elo / Elo Plus DMA driver\n"); | 1440 | pr_info("Freescale Elo series DMA driver\n"); |
| 1453 | return platform_driver_register(&fsldma_of_driver); | 1441 | return platform_driver_register(&fsldma_of_driver); |
| 1454 | } | 1442 | } |
| 1455 | 1443 | ||
| @@ -1461,5 +1449,5 @@ static void __exit fsldma_exit(void) | |||
| 1461 | subsys_initcall(fsldma_init); | 1449 | subsys_initcall(fsldma_init); |
| 1462 | module_exit(fsldma_exit); | 1450 | module_exit(fsldma_exit); |
| 1463 | 1451 | ||
| 1464 | MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); | 1452 | MODULE_DESCRIPTION("Freescale Elo series DMA driver"); |
| 1465 | MODULE_LICENSE("GPL"); | 1453 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index f5c38791fc74..1ffc24484d23 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h | |||
| @@ -112,7 +112,7 @@ struct fsldma_chan_regs { | |||
| 112 | }; | 112 | }; |
| 113 | 113 | ||
| 114 | struct fsldma_chan; | 114 | struct fsldma_chan; |
| 115 | #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 | 115 | #define FSL_DMA_MAX_CHANS_PER_DEVICE 8 |
| 116 | 116 | ||
| 117 | struct fsldma_device { | 117 | struct fsldma_device { |
| 118 | void __iomem *regs; /* DGSR register base */ | 118 | void __iomem *regs; /* DGSR register base */ |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 55852c026791..6f9ac2022abd 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
| @@ -572,9 +572,11 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
| 572 | 572 | ||
| 573 | imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); | 573 | imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); |
| 574 | 574 | ||
| 575 | dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " | 575 | dev_dbg(imxdma->dev, |
| 576 | "dma_length=%d\n", __func__, imxdmac->channel, | 576 | "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n", |
| 577 | d->dest, d->src, d->len); | 577 | __func__, imxdmac->channel, |
| 578 | (unsigned long long)d->dest, | ||
| 579 | (unsigned long long)d->src, d->len); | ||
| 578 | 580 | ||
| 579 | break; | 581 | break; |
| 580 | /* Cyclic transfer is the same as slave_sg with special sg configuration. */ | 582 | /* Cyclic transfer is the same as slave_sg with special sg configuration. */ |
| @@ -586,20 +588,22 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
| 586 | imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, | 588 | imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, |
| 587 | DMA_CCR(imxdmac->channel)); | 589 | DMA_CCR(imxdmac->channel)); |
| 588 | 590 | ||
| 589 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " | 591 | dev_dbg(imxdma->dev, |
| 590 | "total length=%d dev_addr=0x%08x (dev2mem)\n", | 592 | "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n", |
| 591 | __func__, imxdmac->channel, d->sg, d->sgcount, | 593 | __func__, imxdmac->channel, |
| 592 | d->len, imxdmac->per_address); | 594 | d->sg, d->sgcount, d->len, |
| 595 | (unsigned long long)imxdmac->per_address); | ||
| 593 | } else if (d->direction == DMA_MEM_TO_DEV) { | 596 | } else if (d->direction == DMA_MEM_TO_DEV) { |
| 594 | imx_dmav1_writel(imxdma, imxdmac->per_address, | 597 | imx_dmav1_writel(imxdma, imxdmac->per_address, |
| 595 | DMA_DAR(imxdmac->channel)); | 598 | DMA_DAR(imxdmac->channel)); |
| 596 | imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, | 599 | imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, |
| 597 | DMA_CCR(imxdmac->channel)); | 600 | DMA_CCR(imxdmac->channel)); |
| 598 | 601 | ||
| 599 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " | 602 | dev_dbg(imxdma->dev, |
| 600 | "total length=%d dev_addr=0x%08x (mem2dev)\n", | 603 | "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n", |
| 601 | __func__, imxdmac->channel, d->sg, d->sgcount, | 604 | __func__, imxdmac->channel, |
| 602 | d->len, imxdmac->per_address); | 605 | d->sg, d->sgcount, d->len, |
| 606 | (unsigned long long)imxdmac->per_address); | ||
| 603 | } else { | 607 | } else { |
| 604 | dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", | 608 | dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", |
| 605 | __func__, imxdmac->channel); | 609 | __func__, imxdmac->channel); |
| @@ -771,7 +775,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan) | |||
| 771 | desc->desc.tx_submit = imxdma_tx_submit; | 775 | desc->desc.tx_submit = imxdma_tx_submit; |
| 772 | /* txd.flags will be overwritten in prep funcs */ | 776 | /* txd.flags will be overwritten in prep funcs */ |
| 773 | desc->desc.flags = DMA_CTRL_ACK; | 777 | desc->desc.flags = DMA_CTRL_ACK; |
| 774 | desc->status = DMA_SUCCESS; | 778 | desc->status = DMA_COMPLETE; |
| 775 | 779 | ||
| 776 | list_add_tail(&desc->node, &imxdmac->ld_free); | 780 | list_add_tail(&desc->node, &imxdmac->ld_free); |
| 777 | imxdmac->descs_allocated++; | 781 | imxdmac->descs_allocated++; |
| @@ -870,7 +874,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | |||
| 870 | int i; | 874 | int i; |
| 871 | unsigned int periods = buf_len / period_len; | 875 | unsigned int periods = buf_len / period_len; |
| 872 | 876 | ||
| 873 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", | 877 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n", |
| 874 | __func__, imxdmac->channel, buf_len, period_len); | 878 | __func__, imxdmac->channel, buf_len, period_len); |
| 875 | 879 | ||
| 876 | if (list_empty(&imxdmac->ld_free) || | 880 | if (list_empty(&imxdmac->ld_free) || |
| @@ -926,8 +930,9 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( | |||
| 926 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 930 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
| 927 | struct imxdma_desc *desc; | 931 | struct imxdma_desc *desc; |
| 928 | 932 | ||
| 929 | dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", | 933 | dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n", |
| 930 | __func__, imxdmac->channel, src, dest, len); | 934 | __func__, imxdmac->channel, (unsigned long long)src, |
| 935 | (unsigned long long)dest, len); | ||
| 931 | 936 | ||
| 932 | if (list_empty(&imxdmac->ld_free) || | 937 | if (list_empty(&imxdmac->ld_free) || |
| 933 | imxdma_chan_is_doing_cyclic(imxdmac)) | 938 | imxdma_chan_is_doing_cyclic(imxdmac)) |
| @@ -956,9 +961,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved( | |||
| 956 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 961 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
| 957 | struct imxdma_desc *desc; | 962 | struct imxdma_desc *desc; |
| 958 | 963 | ||
| 959 | dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n" | 964 | dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n" |
| 960 | " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__, | 965 | " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__, |
| 961 | imxdmac->channel, xt->src_start, xt->dst_start, | 966 | imxdmac->channel, (unsigned long long)xt->src_start, |
| 967 | (unsigned long long) xt->dst_start, | ||
| 962 | xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", | 968 | xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", |
| 963 | xt->numf, xt->frame_size); | 969 | xt->numf, xt->frame_size); |
| 964 | 970 | ||
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index c1fd504cae28..c75679d42028 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
| @@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | |||
| 638 | if (error) | 638 | if (error) |
| 639 | sdmac->status = DMA_ERROR; | 639 | sdmac->status = DMA_ERROR; |
| 640 | else | 640 | else |
| 641 | sdmac->status = DMA_SUCCESS; | 641 | sdmac->status = DMA_COMPLETE; |
| 642 | 642 | ||
| 643 | dma_cookie_complete(&sdmac->desc); | 643 | dma_cookie_complete(&sdmac->desc); |
| 644 | if (sdmac->desc.callback) | 644 | if (sdmac->desc.callback) |
| @@ -1089,8 +1089,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
| 1089 | param &= ~BD_CONT; | 1089 | param &= ~BD_CONT; |
| 1090 | } | 1090 | } |
| 1091 | 1091 | ||
| 1092 | dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", | 1092 | dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", |
| 1093 | i, count, sg->dma_address, | 1093 | i, count, (u64)sg->dma_address, |
| 1094 | param & BD_WRAP ? "wrap" : "", | 1094 | param & BD_WRAP ? "wrap" : "", |
| 1095 | param & BD_INTR ? " intr" : ""); | 1095 | param & BD_INTR ? " intr" : ""); |
| 1096 | 1096 | ||
| @@ -1163,8 +1163,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | |||
| 1163 | if (i + 1 == num_periods) | 1163 | if (i + 1 == num_periods) |
| 1164 | param |= BD_WRAP; | 1164 | param |= BD_WRAP; |
| 1165 | 1165 | ||
| 1166 | dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", | 1166 | dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", |
| 1167 | i, period_len, dma_addr, | 1167 | i, period_len, (u64)dma_addr, |
| 1168 | param & BD_WRAP ? "wrap" : "", | 1168 | param & BD_WRAP ? "wrap" : "", |
| 1169 | param & BD_INTR ? " intr" : ""); | 1169 | param & BD_INTR ? " intr" : ""); |
| 1170 | 1170 | ||
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index a975ebebea8a..1aab8130efa1 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
| @@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | |||
| 309 | callback_txd(param_txd); | 309 | callback_txd(param_txd); |
| 310 | } | 310 | } |
| 311 | if (midc->raw_tfr) { | 311 | if (midc->raw_tfr) { |
| 312 | desc->status = DMA_SUCCESS; | 312 | desc->status = DMA_COMPLETE; |
| 313 | if (desc->lli != NULL) { | 313 | if (desc->lli != NULL) { |
| 314 | pci_pool_free(desc->lli_pool, desc->lli, | 314 | pci_pool_free(desc->lli_pool, desc->lli, |
| 315 | desc->lli_phys); | 315 | desc->lli_phys); |
| @@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, | |||
| 481 | enum dma_status ret; | 481 | enum dma_status ret; |
| 482 | 482 | ||
| 483 | ret = dma_cookie_status(chan, cookie, txstate); | 483 | ret = dma_cookie_status(chan, cookie, txstate); |
| 484 | if (ret != DMA_SUCCESS) { | 484 | if (ret != DMA_COMPLETE) { |
| 485 | spin_lock_bh(&midc->lock); | 485 | spin_lock_bh(&midc->lock); |
| 486 | midc_scan_descriptors(to_middma_device(chan->device), midc); | 486 | midc_scan_descriptors(to_middma_device(chan->device), midc); |
| 487 | spin_unlock_bh(&midc->lock); | 487 | spin_unlock_bh(&midc->lock); |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 5ff6fc1819dc..1a49c777607c 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
| @@ -531,21 +531,6 @@ static void ioat1_cleanup_event(unsigned long data) | |||
| 531 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | 531 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
| 532 | } | 532 | } |
| 533 | 533 | ||
| 534 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||
| 535 | size_t len, struct ioat_dma_descriptor *hw) | ||
| 536 | { | ||
| 537 | struct pci_dev *pdev = chan->device->pdev; | ||
| 538 | size_t offset = len - hw->size; | ||
| 539 | |||
| 540 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
| 541 | ioat_unmap(pdev, hw->dst_addr - offset, len, | ||
| 542 | PCI_DMA_FROMDEVICE, flags, 1); | ||
| 543 | |||
| 544 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) | ||
| 545 | ioat_unmap(pdev, hw->src_addr - offset, len, | ||
| 546 | PCI_DMA_TODEVICE, flags, 0); | ||
| 547 | } | ||
| 548 | |||
| 549 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) | 534 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) |
| 550 | { | 535 | { |
| 551 | dma_addr_t phys_complete; | 536 | dma_addr_t phys_complete; |
| @@ -602,7 +587,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) | |||
| 602 | dump_desc_dbg(ioat, desc); | 587 | dump_desc_dbg(ioat, desc); |
| 603 | if (tx->cookie) { | 588 | if (tx->cookie) { |
| 604 | dma_cookie_complete(tx); | 589 | dma_cookie_complete(tx); |
| 605 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | 590 | dma_descriptor_unmap(tx); |
| 606 | ioat->active -= desc->hw->tx_cnt; | 591 | ioat->active -= desc->hw->tx_cnt; |
| 607 | if (tx->callback) { | 592 | if (tx->callback) { |
| 608 | tx->callback(tx->callback_param); | 593 | tx->callback(tx->callback_param); |
| @@ -733,7 +718,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
| 733 | enum dma_status ret; | 718 | enum dma_status ret; |
| 734 | 719 | ||
| 735 | ret = dma_cookie_status(c, cookie, txstate); | 720 | ret = dma_cookie_status(c, cookie, txstate); |
| 736 | if (ret == DMA_SUCCESS) | 721 | if (ret == DMA_COMPLETE) |
| 737 | return ret; | 722 | return ret; |
| 738 | 723 | ||
| 739 | device->cleanup_fn((unsigned long) c); | 724 | device->cleanup_fn((unsigned long) c); |
| @@ -833,8 +818,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) | |||
| 833 | 818 | ||
| 834 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | 819 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); |
| 835 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | 820 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); |
| 836 | flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | | 821 | flags = DMA_PREP_INTERRUPT; |
| 837 | DMA_PREP_INTERRUPT; | ||
| 838 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | 822 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, |
| 839 | IOAT_TEST_SIZE, flags); | 823 | IOAT_TEST_SIZE, flags); |
| 840 | if (!tx) { | 824 | if (!tx) { |
| @@ -859,7 +843,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) | |||
| 859 | 843 | ||
| 860 | if (tmo == 0 || | 844 | if (tmo == 0 || |
| 861 | dma->device_tx_status(dma_chan, cookie, NULL) | 845 | dma->device_tx_status(dma_chan, cookie, NULL) |
| 862 | != DMA_SUCCESS) { | 846 | != DMA_COMPLETE) { |
| 863 | dev_err(dev, "Self-test copy timed out, disabling\n"); | 847 | dev_err(dev, "Self-test copy timed out, disabling\n"); |
| 864 | err = -ENODEV; | 848 | err = -ENODEV; |
| 865 | goto unmap_dma; | 849 | goto unmap_dma; |
| @@ -885,8 +869,7 @@ static char ioat_interrupt_style[32] = "msix"; | |||
| 885 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | 869 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, |
| 886 | sizeof(ioat_interrupt_style), 0644); | 870 | sizeof(ioat_interrupt_style), 0644); |
| 887 | MODULE_PARM_DESC(ioat_interrupt_style, | 871 | MODULE_PARM_DESC(ioat_interrupt_style, |
| 888 | "set ioat interrupt style: msix (default), " | 872 | "set ioat interrupt style: msix (default), msi, intx"); |
| 889 | "msix-single-vector, msi, intx)"); | ||
| 890 | 873 | ||
| 891 | /** | 874 | /** |
| 892 | * ioat_dma_setup_interrupts - setup interrupt handler | 875 | * ioat_dma_setup_interrupts - setup interrupt handler |
| @@ -904,8 +887,6 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *device) | |||
| 904 | 887 | ||
| 905 | if (!strcmp(ioat_interrupt_style, "msix")) | 888 | if (!strcmp(ioat_interrupt_style, "msix")) |
| 906 | goto msix; | 889 | goto msix; |
| 907 | if (!strcmp(ioat_interrupt_style, "msix-single-vector")) | ||
| 908 | goto msix_single_vector; | ||
| 909 | if (!strcmp(ioat_interrupt_style, "msi")) | 890 | if (!strcmp(ioat_interrupt_style, "msi")) |
| 910 | goto msi; | 891 | goto msi; |
| 911 | if (!strcmp(ioat_interrupt_style, "intx")) | 892 | if (!strcmp(ioat_interrupt_style, "intx")) |
| @@ -920,10 +901,8 @@ msix: | |||
| 920 | device->msix_entries[i].entry = i; | 901 | device->msix_entries[i].entry = i; |
| 921 | 902 | ||
| 922 | err = pci_enable_msix(pdev, device->msix_entries, msixcnt); | 903 | err = pci_enable_msix(pdev, device->msix_entries, msixcnt); |
| 923 | if (err < 0) | 904 | if (err) |
| 924 | goto msi; | 905 | goto msi; |
| 925 | if (err > 0) | ||
| 926 | goto msix_single_vector; | ||
| 927 | 906 | ||
| 928 | for (i = 0; i < msixcnt; i++) { | 907 | for (i = 0; i < msixcnt; i++) { |
| 929 | msix = &device->msix_entries[i]; | 908 | msix = &device->msix_entries[i]; |
| @@ -937,29 +916,13 @@ msix: | |||
| 937 | chan = ioat_chan_by_index(device, j); | 916 | chan = ioat_chan_by_index(device, j); |
| 938 | devm_free_irq(dev, msix->vector, chan); | 917 | devm_free_irq(dev, msix->vector, chan); |
| 939 | } | 918 | } |
| 940 | goto msix_single_vector; | 919 | goto msi; |
| 941 | } | 920 | } |
| 942 | } | 921 | } |
| 943 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | 922 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; |
| 944 | device->irq_mode = IOAT_MSIX; | 923 | device->irq_mode = IOAT_MSIX; |
| 945 | goto done; | 924 | goto done; |
| 946 | 925 | ||
| 947 | msix_single_vector: | ||
| 948 | msix = &device->msix_entries[0]; | ||
| 949 | msix->entry = 0; | ||
| 950 | err = pci_enable_msix(pdev, device->msix_entries, 1); | ||
| 951 | if (err) | ||
| 952 | goto msi; | ||
| 953 | |||
| 954 | err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0, | ||
| 955 | "ioat-msix", device); | ||
| 956 | if (err) { | ||
| 957 | pci_disable_msix(pdev); | ||
| 958 | goto msi; | ||
| 959 | } | ||
| 960 | device->irq_mode = IOAT_MSIX_SINGLE; | ||
| 961 | goto done; | ||
| 962 | |||
| 963 | msi: | 926 | msi: |
| 964 | err = pci_enable_msi(pdev); | 927 | err = pci_enable_msi(pdev); |
| 965 | if (err) | 928 | if (err) |
| @@ -971,7 +934,7 @@ msi: | |||
| 971 | pci_disable_msi(pdev); | 934 | pci_disable_msi(pdev); |
| 972 | goto intx; | 935 | goto intx; |
| 973 | } | 936 | } |
| 974 | device->irq_mode = IOAT_MSIX; | 937 | device->irq_mode = IOAT_MSI; |
| 975 | goto done; | 938 | goto done; |
| 976 | 939 | ||
| 977 | intx: | 940 | intx: |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 54fb7b9ff9aa..11fb877ddca9 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
| @@ -52,7 +52,6 @@ | |||
| 52 | enum ioat_irq_mode { | 52 | enum ioat_irq_mode { |
| 53 | IOAT_NOIRQ = 0, | 53 | IOAT_NOIRQ = 0, |
| 54 | IOAT_MSIX, | 54 | IOAT_MSIX, |
| 55 | IOAT_MSIX_SINGLE, | ||
| 56 | IOAT_MSI, | 55 | IOAT_MSI, |
| 57 | IOAT_INTX | 56 | IOAT_INTX |
| 58 | }; | 57 | }; |
| @@ -83,7 +82,6 @@ struct ioatdma_device { | |||
| 83 | struct pci_pool *completion_pool; | 82 | struct pci_pool *completion_pool; |
| 84 | #define MAX_SED_POOLS 5 | 83 | #define MAX_SED_POOLS 5 |
| 85 | struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; | 84 | struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; |
| 86 | struct kmem_cache *sed_pool; | ||
| 87 | struct dma_device common; | 85 | struct dma_device common; |
| 88 | u8 version; | 86 | u8 version; |
| 89 | struct msix_entry msix_entries[4]; | 87 | struct msix_entry msix_entries[4]; |
| @@ -342,16 +340,6 @@ static inline bool is_ioat_bug(unsigned long err) | |||
| 342 | return !!err; | 340 | return !!err; |
| 343 | } | 341 | } |
| 344 | 342 | ||
| 345 | static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, | ||
| 346 | int direction, enum dma_ctrl_flags flags, bool dst) | ||
| 347 | { | ||
| 348 | if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || | ||
| 349 | (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) | ||
| 350 | pci_unmap_single(pdev, addr, len, direction); | ||
| 351 | else | ||
| 352 | pci_unmap_page(pdev, addr, len, direction); | ||
| 353 | } | ||
| 354 | |||
| 355 | int ioat_probe(struct ioatdma_device *device); | 343 | int ioat_probe(struct ioatdma_device *device); |
| 356 | int ioat_register(struct ioatdma_device *device); | 344 | int ioat_register(struct ioatdma_device *device); |
| 357 | int ioat1_dma_probe(struct ioatdma_device *dev, int dca); | 345 | int ioat1_dma_probe(struct ioatdma_device *dev, int dca); |
| @@ -363,8 +351,6 @@ void ioat_init_channel(struct ioatdma_device *device, | |||
| 363 | struct ioat_chan_common *chan, int idx); | 351 | struct ioat_chan_common *chan, int idx); |
| 364 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | 352 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
| 365 | struct dma_tx_state *txstate); | 353 | struct dma_tx_state *txstate); |
| 366 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||
| 367 | size_t len, struct ioat_dma_descriptor *hw); | ||
| 368 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | 354 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, |
| 369 | dma_addr_t *phys_complete); | 355 | dma_addr_t *phys_complete); |
| 370 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); | 356 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index b925e1b1d139..5d3affe7e976 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
| @@ -148,7 +148,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
| 148 | tx = &desc->txd; | 148 | tx = &desc->txd; |
| 149 | dump_desc_dbg(ioat, desc); | 149 | dump_desc_dbg(ioat, desc); |
| 150 | if (tx->cookie) { | 150 | if (tx->cookie) { |
| 151 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | 151 | dma_descriptor_unmap(tx); |
| 152 | dma_cookie_complete(tx); | 152 | dma_cookie_complete(tx); |
| 153 | if (tx->callback) { | 153 | if (tx->callback) { |
| 154 | tx->callback(tx->callback_param); | 154 | tx->callback(tx->callback_param); |
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 212d584fe427..470292767e68 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h | |||
| @@ -157,7 +157,6 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) | |||
| 157 | 157 | ||
| 158 | int ioat2_dma_probe(struct ioatdma_device *dev, int dca); | 158 | int ioat2_dma_probe(struct ioatdma_device *dev, int dca); |
| 159 | int ioat3_dma_probe(struct ioatdma_device *dev, int dca); | 159 | int ioat3_dma_probe(struct ioatdma_device *dev, int dca); |
| 160 | void ioat3_dma_remove(struct ioatdma_device *dev); | ||
| 161 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 160 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
| 162 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 161 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
| 163 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); | 162 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index d8ececaf1b57..820817e97e62 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
| @@ -67,6 +67,8 @@ | |||
| 67 | #include "dma.h" | 67 | #include "dma.h" |
| 68 | #include "dma_v2.h" | 68 | #include "dma_v2.h" |
| 69 | 69 | ||
| 70 | extern struct kmem_cache *ioat3_sed_cache; | ||
| 71 | |||
| 70 | /* ioat hardware assumes at least two sources for raid operations */ | 72 | /* ioat hardware assumes at least two sources for raid operations */ |
| 71 | #define src_cnt_to_sw(x) ((x) + 2) | 73 | #define src_cnt_to_sw(x) ((x) + 2) |
| 72 | #define src_cnt_to_hw(x) ((x) - 2) | 74 | #define src_cnt_to_hw(x) ((x) - 2) |
| @@ -87,22 +89,8 @@ static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; | |||
| 87 | static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, | 89 | static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, |
| 88 | 0, 1, 2, 3, 4, 5, 6 }; | 90 | 0, 1, 2, 3, 4, 5, 6 }; |
| 89 | 91 | ||
| 90 | /* | ||
| 91 | * technically sources 1 and 2 do not require SED, but the op will have | ||
| 92 | * at least 9 descriptors so that's irrelevant. | ||
| 93 | */ | ||
| 94 | static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
| 95 | 1, 1, 1, 1, 1, 1, 1 }; | ||
| 96 | |||
| 97 | static void ioat3_eh(struct ioat2_dma_chan *ioat); | 92 | static void ioat3_eh(struct ioat2_dma_chan *ioat); |
| 98 | 93 | ||
| 99 | static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) | ||
| 100 | { | ||
| 101 | struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; | ||
| 102 | |||
| 103 | return raw->field[xor_idx_to_field[idx]]; | ||
| 104 | } | ||
| 105 | |||
| 106 | static void xor_set_src(struct ioat_raw_descriptor *descs[2], | 94 | static void xor_set_src(struct ioat_raw_descriptor *descs[2], |
| 107 | dma_addr_t addr, u32 offset, int idx) | 95 | dma_addr_t addr, u32 offset, int idx) |
| 108 | { | 96 | { |
| @@ -135,12 +123,6 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2], | |||
| 135 | pq->coef[idx] = coef; | 123 | pq->coef[idx] = coef; |
| 136 | } | 124 | } |
| 137 | 125 | ||
| 138 | static int sed_get_pq16_pool_idx(int src_cnt) | ||
| 139 | { | ||
| 140 | |||
| 141 | return pq16_idx_to_sed[src_cnt]; | ||
| 142 | } | ||
| 143 | |||
| 144 | static bool is_jf_ioat(struct pci_dev *pdev) | 126 | static bool is_jf_ioat(struct pci_dev *pdev) |
| 145 | { | 127 | { |
| 146 | switch (pdev->device) { | 128 | switch (pdev->device) { |
| @@ -272,7 +254,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) | |||
| 272 | struct ioat_sed_ent *sed; | 254 | struct ioat_sed_ent *sed; |
| 273 | gfp_t flags = __GFP_ZERO | GFP_ATOMIC; | 255 | gfp_t flags = __GFP_ZERO | GFP_ATOMIC; |
| 274 | 256 | ||
| 275 | sed = kmem_cache_alloc(device->sed_pool, flags); | 257 | sed = kmem_cache_alloc(ioat3_sed_cache, flags); |
| 276 | if (!sed) | 258 | if (!sed) |
| 277 | return NULL; | 259 | return NULL; |
| 278 | 260 | ||
| @@ -280,7 +262,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) | |||
| 280 | sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], | 262 | sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], |
| 281 | flags, &sed->dma); | 263 | flags, &sed->dma); |
| 282 | if (!sed->hw) { | 264 | if (!sed->hw) { |
| 283 | kmem_cache_free(device->sed_pool, sed); | 265 | kmem_cache_free(ioat3_sed_cache, sed); |
| 284 | return NULL; | 266 | return NULL; |
| 285 | } | 267 | } |
| 286 | 268 | ||
| @@ -293,165 +275,7 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s | |||
| 293 | return; | 275 | return; |
| 294 | 276 | ||
| 295 | dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); | 277 | dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); |
| 296 | kmem_cache_free(device->sed_pool, sed); | 278 | kmem_cache_free(ioat3_sed_cache, sed); |
| 297 | } | ||
| 298 | |||
| 299 | static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, | ||
| 300 | struct ioat_ring_ent *desc, int idx) | ||
| 301 | { | ||
| 302 | struct ioat_chan_common *chan = &ioat->base; | ||
| 303 | struct pci_dev *pdev = chan->device->pdev; | ||
| 304 | size_t len = desc->len; | ||
| 305 | size_t offset = len - desc->hw->size; | ||
| 306 | struct dma_async_tx_descriptor *tx = &desc->txd; | ||
| 307 | enum dma_ctrl_flags flags = tx->flags; | ||
| 308 | |||
| 309 | switch (desc->hw->ctl_f.op) { | ||
| 310 | case IOAT_OP_COPY: | ||
| 311 | if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ | ||
| 312 | ioat_dma_unmap(chan, flags, len, desc->hw); | ||
| 313 | break; | ||
| 314 | case IOAT_OP_XOR_VAL: | ||
| 315 | case IOAT_OP_XOR: { | ||
| 316 | struct ioat_xor_descriptor *xor = desc->xor; | ||
| 317 | struct ioat_ring_ent *ext; | ||
| 318 | struct ioat_xor_ext_descriptor *xor_ex = NULL; | ||
| 319 | int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt); | ||
| 320 | struct ioat_raw_descriptor *descs[2]; | ||
| 321 | int i; | ||
| 322 | |||
| 323 | if (src_cnt > 5) { | ||
| 324 | ext = ioat2_get_ring_ent(ioat, idx + 1); | ||
| 325 | xor_ex = ext->xor_ex; | ||
| 326 | } | ||
| 327 | |||
| 328 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 329 | descs[0] = (struct ioat_raw_descriptor *) xor; | ||
| 330 | descs[1] = (struct ioat_raw_descriptor *) xor_ex; | ||
| 331 | for (i = 0; i < src_cnt; i++) { | ||
| 332 | dma_addr_t src = xor_get_src(descs, i); | ||
| 333 | |||
| 334 | ioat_unmap(pdev, src - offset, len, | ||
| 335 | PCI_DMA_TODEVICE, flags, 0); | ||
| 336 | } | ||
| 337 | |||
| 338 | /* dest is a source in xor validate operations */ | ||
| 339 | if (xor->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
| 340 | ioat_unmap(pdev, xor->dst_addr - offset, len, | ||
| 341 | PCI_DMA_TODEVICE, flags, 1); | ||
| 342 | break; | ||
| 343 | } | ||
| 344 | } | ||
| 345 | |||
| 346 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
| 347 | ioat_unmap(pdev, xor->dst_addr - offset, len, | ||
| 348 | PCI_DMA_FROMDEVICE, flags, 1); | ||
| 349 | break; | ||
| 350 | } | ||
| 351 | case IOAT_OP_PQ_VAL: | ||
| 352 | case IOAT_OP_PQ: { | ||
| 353 | struct ioat_pq_descriptor *pq = desc->pq; | ||
| 354 | struct ioat_ring_ent *ext; | ||
| 355 | struct ioat_pq_ext_descriptor *pq_ex = NULL; | ||
| 356 | int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); | ||
| 357 | struct ioat_raw_descriptor *descs[2]; | ||
| 358 | int i; | ||
| 359 | |||
| 360 | if (src_cnt > 3) { | ||
| 361 | ext = ioat2_get_ring_ent(ioat, idx + 1); | ||
| 362 | pq_ex = ext->pq_ex; | ||
| 363 | } | ||
| 364 | |||
| 365 | /* in the 'continue' case don't unmap the dests as sources */ | ||
| 366 | if (dmaf_p_disabled_continue(flags)) | ||
| 367 | src_cnt--; | ||
| 368 | else if (dmaf_continue(flags)) | ||
| 369 | src_cnt -= 3; | ||
| 370 | |||
| 371 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 372 | descs[0] = (struct ioat_raw_descriptor *) pq; | ||
| 373 | descs[1] = (struct ioat_raw_descriptor *) pq_ex; | ||
| 374 | for (i = 0; i < src_cnt; i++) { | ||
| 375 | dma_addr_t src = pq_get_src(descs, i); | ||
| 376 | |||
| 377 | ioat_unmap(pdev, src - offset, len, | ||
| 378 | PCI_DMA_TODEVICE, flags, 0); | ||
| 379 | } | ||
| 380 | |||
| 381 | /* the dests are sources in pq validate operations */ | ||
| 382 | if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
| 383 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
| 384 | ioat_unmap(pdev, pq->p_addr - offset, | ||
| 385 | len, PCI_DMA_TODEVICE, flags, 0); | ||
| 386 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
| 387 | ioat_unmap(pdev, pq->q_addr - offset, | ||
| 388 | len, PCI_DMA_TODEVICE, flags, 0); | ||
| 389 | break; | ||
| 390 | } | ||
| 391 | } | ||
| 392 | |||
| 393 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
| 394 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
| 395 | ioat_unmap(pdev, pq->p_addr - offset, len, | ||
| 396 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
| 397 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
| 398 | ioat_unmap(pdev, pq->q_addr - offset, len, | ||
| 399 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
| 400 | } | ||
| 401 | break; | ||
| 402 | } | ||
| 403 | case IOAT_OP_PQ_16S: | ||
| 404 | case IOAT_OP_PQ_VAL_16S: { | ||
| 405 | struct ioat_pq_descriptor *pq = desc->pq; | ||
| 406 | int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); | ||
| 407 | struct ioat_raw_descriptor *descs[4]; | ||
| 408 | int i; | ||
| 409 | |||
| 410 | /* in the 'continue' case don't unmap the dests as sources */ | ||
| 411 | if (dmaf_p_disabled_continue(flags)) | ||
| 412 | src_cnt--; | ||
| 413 | else if (dmaf_continue(flags)) | ||
| 414 | src_cnt -= 3; | ||
| 415 | |||
| 416 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 417 | descs[0] = (struct ioat_raw_descriptor *)pq; | ||
| 418 | descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw); | ||
| 419 | descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]); | ||
| 420 | for (i = 0; i < src_cnt; i++) { | ||
| 421 | dma_addr_t src = pq16_get_src(descs, i); | ||
| 422 | |||
| 423 | ioat_unmap(pdev, src - offset, len, | ||
| 424 | PCI_DMA_TODEVICE, flags, 0); | ||
| 425 | } | ||
| 426 | |||
| 427 | /* the dests are sources in pq validate operations */ | ||
| 428 | if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
| 429 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
| 430 | ioat_unmap(pdev, pq->p_addr - offset, | ||
| 431 | len, PCI_DMA_TODEVICE, | ||
| 432 | flags, 0); | ||
| 433 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
| 434 | ioat_unmap(pdev, pq->q_addr - offset, | ||
| 435 | len, PCI_DMA_TODEVICE, | ||
| 436 | flags, 0); | ||
| 437 | break; | ||
| 438 | } | ||
| 439 | } | ||
| 440 | |||
| 441 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
| 442 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
| 443 | ioat_unmap(pdev, pq->p_addr - offset, len, | ||
| 444 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
| 445 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
| 446 | ioat_unmap(pdev, pq->q_addr - offset, len, | ||
| 447 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
| 448 | } | ||
| 449 | break; | ||
| 450 | } | ||
| 451 | default: | ||
| 452 | dev_err(&pdev->dev, "%s: unknown op type: %#x\n", | ||
| 453 | __func__, desc->hw->ctl_f.op); | ||
| 454 | } | ||
| 455 | } | 279 | } |
| 456 | 280 | ||
| 457 | static bool desc_has_ext(struct ioat_ring_ent *desc) | 281 | static bool desc_has_ext(struct ioat_ring_ent *desc) |
| @@ -577,7 +401,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
| 577 | tx = &desc->txd; | 401 | tx = &desc->txd; |
| 578 | if (tx->cookie) { | 402 | if (tx->cookie) { |
| 579 | dma_cookie_complete(tx); | 403 | dma_cookie_complete(tx); |
| 580 | ioat3_dma_unmap(ioat, desc, idx + i); | 404 | dma_descriptor_unmap(tx); |
| 581 | if (tx->callback) { | 405 | if (tx->callback) { |
| 582 | tx->callback(tx->callback_param); | 406 | tx->callback(tx->callback_param); |
| 583 | tx->callback = NULL; | 407 | tx->callback = NULL; |
| @@ -807,7 +631,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
| 807 | enum dma_status ret; | 631 | enum dma_status ret; |
| 808 | 632 | ||
| 809 | ret = dma_cookie_status(c, cookie, txstate); | 633 | ret = dma_cookie_status(c, cookie, txstate); |
| 810 | if (ret == DMA_SUCCESS) | 634 | if (ret == DMA_COMPLETE) |
| 811 | return ret; | 635 | return ret; |
| 812 | 636 | ||
| 813 | ioat3_cleanup(ioat); | 637 | ioat3_cleanup(ioat); |
| @@ -1129,9 +953,6 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
| 1129 | u8 op; | 953 | u8 op; |
| 1130 | int i, s, idx, num_descs; | 954 | int i, s, idx, num_descs; |
| 1131 | 955 | ||
| 1132 | /* this function only handles src_cnt 9 - 16 */ | ||
| 1133 | BUG_ON(src_cnt < 9); | ||
| 1134 | |||
| 1135 | /* this function is only called with 9-16 sources */ | 956 | /* this function is only called with 9-16 sources */ |
| 1136 | op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; | 957 | op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; |
| 1137 | 958 | ||
| @@ -1159,8 +980,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
| 1159 | 980 | ||
| 1160 | descs[0] = (struct ioat_raw_descriptor *) pq; | 981 | descs[0] = (struct ioat_raw_descriptor *) pq; |
| 1161 | 982 | ||
| 1162 | desc->sed = ioat3_alloc_sed(device, | 983 | desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3); |
| 1163 | sed_get_pq16_pool_idx(src_cnt)); | ||
| 1164 | if (!desc->sed) { | 984 | if (!desc->sed) { |
| 1165 | dev_err(to_dev(chan), | 985 | dev_err(to_dev(chan), |
| 1166 | "%s: no free sed entries\n", __func__); | 986 | "%s: no free sed entries\n", __func__); |
| @@ -1218,13 +1038,21 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
| 1218 | return &desc->txd; | 1038 | return &desc->txd; |
| 1219 | } | 1039 | } |
| 1220 | 1040 | ||
| 1041 | static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) | ||
| 1042 | { | ||
| 1043 | if (dmaf_p_disabled_continue(flags)) | ||
| 1044 | return src_cnt + 1; | ||
| 1045 | else if (dmaf_continue(flags)) | ||
| 1046 | return src_cnt + 3; | ||
| 1047 | else | ||
| 1048 | return src_cnt; | ||
| 1049 | } | ||
| 1050 | |||
| 1221 | static struct dma_async_tx_descriptor * | 1051 | static struct dma_async_tx_descriptor * |
| 1222 | ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | 1052 | ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, |
| 1223 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 1053 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
| 1224 | unsigned long flags) | 1054 | unsigned long flags) |
| 1225 | { | 1055 | { |
| 1226 | struct dma_device *dma = chan->device; | ||
| 1227 | |||
| 1228 | /* specify valid address for disabled result */ | 1056 | /* specify valid address for disabled result */ |
| 1229 | if (flags & DMA_PREP_PQ_DISABLE_P) | 1057 | if (flags & DMA_PREP_PQ_DISABLE_P) |
| 1230 | dst[0] = dst[1]; | 1058 | dst[0] = dst[1]; |
| @@ -1244,7 +1072,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |||
| 1244 | single_source_coef[0] = scf[0]; | 1072 | single_source_coef[0] = scf[0]; |
| 1245 | single_source_coef[1] = 0; | 1073 | single_source_coef[1] = 0; |
| 1246 | 1074 | ||
| 1247 | return (src_cnt > 8) && (dma->max_pq > 8) ? | 1075 | return src_cnt_flags(src_cnt, flags) > 8 ? |
| 1248 | __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, | 1076 | __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, |
| 1249 | 2, single_source_coef, len, | 1077 | 2, single_source_coef, len, |
| 1250 | flags) : | 1078 | flags) : |
| @@ -1252,7 +1080,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |||
| 1252 | single_source_coef, len, flags); | 1080 | single_source_coef, len, flags); |
| 1253 | 1081 | ||
| 1254 | } else { | 1082 | } else { |
| 1255 | return (src_cnt > 8) && (dma->max_pq > 8) ? | 1083 | return src_cnt_flags(src_cnt, flags) > 8 ? |
| 1256 | __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, | 1084 | __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, |
| 1257 | scf, len, flags) : | 1085 | scf, len, flags) : |
| 1258 | __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, | 1086 | __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, |
| @@ -1265,8 +1093,6 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |||
| 1265 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 1093 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
| 1266 | enum sum_check_flags *pqres, unsigned long flags) | 1094 | enum sum_check_flags *pqres, unsigned long flags) |
| 1267 | { | 1095 | { |
| 1268 | struct dma_device *dma = chan->device; | ||
| 1269 | |||
| 1270 | /* specify valid address for disabled result */ | 1096 | /* specify valid address for disabled result */ |
| 1271 | if (flags & DMA_PREP_PQ_DISABLE_P) | 1097 | if (flags & DMA_PREP_PQ_DISABLE_P) |
| 1272 | pq[0] = pq[1]; | 1098 | pq[0] = pq[1]; |
| @@ -1278,7 +1104,7 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |||
| 1278 | */ | 1104 | */ |
| 1279 | *pqres = 0; | 1105 | *pqres = 0; |
| 1280 | 1106 | ||
| 1281 | return (src_cnt > 8) && (dma->max_pq > 8) ? | 1107 | return src_cnt_flags(src_cnt, flags) > 8 ? |
| 1282 | __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, | 1108 | __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, |
| 1283 | flags) : | 1109 | flags) : |
| 1284 | __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, | 1110 | __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, |
| @@ -1289,7 +1115,6 @@ static struct dma_async_tx_descriptor * | |||
| 1289 | ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | 1115 | ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, |
| 1290 | unsigned int src_cnt, size_t len, unsigned long flags) | 1116 | unsigned int src_cnt, size_t len, unsigned long flags) |
| 1291 | { | 1117 | { |
| 1292 | struct dma_device *dma = chan->device; | ||
| 1293 | unsigned char scf[src_cnt]; | 1118 | unsigned char scf[src_cnt]; |
| 1294 | dma_addr_t pq[2]; | 1119 | dma_addr_t pq[2]; |
| 1295 | 1120 | ||
| @@ -1298,7 +1123,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | |||
| 1298 | flags |= DMA_PREP_PQ_DISABLE_Q; | 1123 | flags |= DMA_PREP_PQ_DISABLE_Q; |
| 1299 | pq[1] = dst; /* specify valid address for disabled result */ | 1124 | pq[1] = dst; /* specify valid address for disabled result */ |
| 1300 | 1125 | ||
| 1301 | return (src_cnt > 8) && (dma->max_pq > 8) ? | 1126 | return src_cnt_flags(src_cnt, flags) > 8 ? |
| 1302 | __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, | 1127 | __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, |
| 1303 | flags) : | 1128 | flags) : |
| 1304 | __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, | 1129 | __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, |
| @@ -1310,7 +1135,6 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |||
| 1310 | unsigned int src_cnt, size_t len, | 1135 | unsigned int src_cnt, size_t len, |
| 1311 | enum sum_check_flags *result, unsigned long flags) | 1136 | enum sum_check_flags *result, unsigned long flags) |
| 1312 | { | 1137 | { |
| 1313 | struct dma_device *dma = chan->device; | ||
| 1314 | unsigned char scf[src_cnt]; | 1138 | unsigned char scf[src_cnt]; |
| 1315 | dma_addr_t pq[2]; | 1139 | dma_addr_t pq[2]; |
| 1316 | 1140 | ||
| @@ -1324,8 +1148,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |||
| 1324 | flags |= DMA_PREP_PQ_DISABLE_Q; | 1148 | flags |= DMA_PREP_PQ_DISABLE_Q; |
| 1325 | pq[1] = pq[0]; /* specify valid address for disabled result */ | 1149 | pq[1] = pq[0]; /* specify valid address for disabled result */ |
| 1326 | 1150 | ||
| 1327 | 1151 | return src_cnt_flags(src_cnt, flags) > 8 ? | |
| 1328 | return (src_cnt > 8) && (dma->max_pq > 8) ? | ||
| 1329 | __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, | 1152 | __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, |
| 1330 | scf, len, flags) : | 1153 | scf, len, flags) : |
| 1331 | __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, | 1154 | __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, |
| @@ -1444,9 +1267,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
| 1444 | DMA_TO_DEVICE); | 1267 | DMA_TO_DEVICE); |
| 1445 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | 1268 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
| 1446 | IOAT_NUM_SRC_TEST, PAGE_SIZE, | 1269 | IOAT_NUM_SRC_TEST, PAGE_SIZE, |
| 1447 | DMA_PREP_INTERRUPT | | 1270 | DMA_PREP_INTERRUPT); |
| 1448 | DMA_COMPL_SKIP_SRC_UNMAP | | ||
| 1449 | DMA_COMPL_SKIP_DEST_UNMAP); | ||
| 1450 | 1271 | ||
| 1451 | if (!tx) { | 1272 | if (!tx) { |
| 1452 | dev_err(dev, "Self-test xor prep failed\n"); | 1273 | dev_err(dev, "Self-test xor prep failed\n"); |
| @@ -1468,7 +1289,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
| 1468 | 1289 | ||
| 1469 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | 1290 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); |
| 1470 | 1291 | ||
| 1471 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1292 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { |
| 1472 | dev_err(dev, "Self-test xor timed out\n"); | 1293 | dev_err(dev, "Self-test xor timed out\n"); |
| 1473 | err = -ENODEV; | 1294 | err = -ENODEV; |
| 1474 | goto dma_unmap; | 1295 | goto dma_unmap; |
| @@ -1507,9 +1328,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
| 1507 | DMA_TO_DEVICE); | 1328 | DMA_TO_DEVICE); |
| 1508 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | 1329 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, |
| 1509 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | 1330 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, |
| 1510 | &xor_val_result, DMA_PREP_INTERRUPT | | 1331 | &xor_val_result, DMA_PREP_INTERRUPT); |
| 1511 | DMA_COMPL_SKIP_SRC_UNMAP | | ||
| 1512 | DMA_COMPL_SKIP_DEST_UNMAP); | ||
| 1513 | if (!tx) { | 1332 | if (!tx) { |
| 1514 | dev_err(dev, "Self-test zero prep failed\n"); | 1333 | dev_err(dev, "Self-test zero prep failed\n"); |
| 1515 | err = -ENODEV; | 1334 | err = -ENODEV; |
| @@ -1530,7 +1349,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
| 1530 | 1349 | ||
| 1531 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | 1350 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); |
| 1532 | 1351 | ||
| 1533 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1352 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { |
| 1534 | dev_err(dev, "Self-test validate timed out\n"); | 1353 | dev_err(dev, "Self-test validate timed out\n"); |
| 1535 | err = -ENODEV; | 1354 | err = -ENODEV; |
| 1536 | goto dma_unmap; | 1355 | goto dma_unmap; |
| @@ -1545,6 +1364,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
| 1545 | goto free_resources; | 1364 | goto free_resources; |
| 1546 | } | 1365 | } |
| 1547 | 1366 | ||
| 1367 | memset(page_address(dest), 0, PAGE_SIZE); | ||
| 1368 | |||
| 1548 | /* test for non-zero parity sum */ | 1369 | /* test for non-zero parity sum */ |
| 1549 | op = IOAT_OP_XOR_VAL; | 1370 | op = IOAT_OP_XOR_VAL; |
| 1550 | 1371 | ||
| @@ -1554,9 +1375,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
| 1554 | DMA_TO_DEVICE); | 1375 | DMA_TO_DEVICE); |
| 1555 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | 1376 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, |
| 1556 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | 1377 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, |
| 1557 | &xor_val_result, DMA_PREP_INTERRUPT | | 1378 | &xor_val_result, DMA_PREP_INTERRUPT); |
| 1558 | DMA_COMPL_SKIP_SRC_UNMAP | | ||
| 1559 | DMA_COMPL_SKIP_DEST_UNMAP); | ||
| 1560 | if (!tx) { | 1379 | if (!tx) { |
| 1561 | dev_err(dev, "Self-test 2nd zero prep failed\n"); | 1380 | dev_err(dev, "Self-test 2nd zero prep failed\n"); |
| 1562 | err = -ENODEV; | 1381 | err = -ENODEV; |
| @@ -1577,7 +1396,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
| 1577 | 1396 | ||
| 1578 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | 1397 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); |
| 1579 | 1398 | ||
| 1580 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1399 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { |
| 1581 | dev_err(dev, "Self-test 2nd validate timed out\n"); | 1400 | dev_err(dev, "Self-test 2nd validate timed out\n"); |
| 1582 | err = -ENODEV; | 1401 | err = -ENODEV; |
| 1583 | goto dma_unmap; | 1402 | goto dma_unmap; |
| @@ -1630,52 +1449,36 @@ static int ioat3_dma_self_test(struct ioatdma_device *device) | |||
| 1630 | 1449 | ||
| 1631 | static int ioat3_irq_reinit(struct ioatdma_device *device) | 1450 | static int ioat3_irq_reinit(struct ioatdma_device *device) |
| 1632 | { | 1451 | { |
| 1633 | int msixcnt = device->common.chancnt; | ||
| 1634 | struct pci_dev *pdev = device->pdev; | 1452 | struct pci_dev *pdev = device->pdev; |
| 1635 | int i; | 1453 | int irq = pdev->irq, i; |
| 1636 | struct msix_entry *msix; | 1454 | |
| 1637 | struct ioat_chan_common *chan; | 1455 | if (!is_bwd_ioat(pdev)) |
| 1638 | int err = 0; | 1456 | return 0; |
| 1639 | 1457 | ||
| 1640 | switch (device->irq_mode) { | 1458 | switch (device->irq_mode) { |
| 1641 | case IOAT_MSIX: | 1459 | case IOAT_MSIX: |
| 1460 | for (i = 0; i < device->common.chancnt; i++) { | ||
| 1461 | struct msix_entry *msix = &device->msix_entries[i]; | ||
| 1462 | struct ioat_chan_common *chan; | ||
| 1642 | 1463 | ||
| 1643 | for (i = 0; i < msixcnt; i++) { | ||
| 1644 | msix = &device->msix_entries[i]; | ||
| 1645 | chan = ioat_chan_by_index(device, i); | 1464 | chan = ioat_chan_by_index(device, i); |
| 1646 | devm_free_irq(&pdev->dev, msix->vector, chan); | 1465 | devm_free_irq(&pdev->dev, msix->vector, chan); |
| 1647 | } | 1466 | } |
| 1648 | 1467 | ||
| 1649 | pci_disable_msix(pdev); | 1468 | pci_disable_msix(pdev); |
| 1650 | break; | 1469 | break; |
| 1651 | |||
| 1652 | case IOAT_MSIX_SINGLE: | ||
| 1653 | msix = &device->msix_entries[0]; | ||
| 1654 | chan = ioat_chan_by_index(device, 0); | ||
| 1655 | devm_free_irq(&pdev->dev, msix->vector, chan); | ||
| 1656 | pci_disable_msix(pdev); | ||
| 1657 | break; | ||
| 1658 | |||
| 1659 | case IOAT_MSI: | 1470 | case IOAT_MSI: |
| 1660 | chan = ioat_chan_by_index(device, 0); | ||
| 1661 | devm_free_irq(&pdev->dev, pdev->irq, chan); | ||
| 1662 | pci_disable_msi(pdev); | 1471 | pci_disable_msi(pdev); |
| 1663 | break; | 1472 | /* fall through */ |
| 1664 | |||
| 1665 | case IOAT_INTX: | 1473 | case IOAT_INTX: |
| 1666 | chan = ioat_chan_by_index(device, 0); | 1474 | devm_free_irq(&pdev->dev, irq, device); |
| 1667 | devm_free_irq(&pdev->dev, pdev->irq, chan); | ||
| 1668 | break; | 1475 | break; |
| 1669 | |||
| 1670 | default: | 1476 | default: |
| 1671 | return 0; | 1477 | return 0; |
| 1672 | } | 1478 | } |
| 1673 | |||
| 1674 | device->irq_mode = IOAT_NOIRQ; | 1479 | device->irq_mode = IOAT_NOIRQ; |
| 1675 | 1480 | ||
| 1676 | err = ioat_dma_setup_interrupts(device); | 1481 | return ioat_dma_setup_interrupts(device); |
| 1677 | |||
| 1678 | return err; | ||
| 1679 | } | 1482 | } |
| 1680 | 1483 | ||
| 1681 | static int ioat3_reset_hw(struct ioat_chan_common *chan) | 1484 | static int ioat3_reset_hw(struct ioat_chan_common *chan) |
| @@ -1718,14 +1521,12 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) | |||
| 1718 | } | 1521 | } |
| 1719 | 1522 | ||
| 1720 | err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); | 1523 | err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); |
| 1721 | if (err) { | 1524 | if (!err) |
| 1722 | dev_err(&pdev->dev, "Failed to reset!\n"); | ||
| 1723 | return err; | ||
| 1724 | } | ||
| 1725 | |||
| 1726 | if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev)) | ||
| 1727 | err = ioat3_irq_reinit(device); | 1525 | err = ioat3_irq_reinit(device); |
| 1728 | 1526 | ||
| 1527 | if (err) | ||
| 1528 | dev_err(&pdev->dev, "Failed to reset: %d\n", err); | ||
| 1529 | |||
| 1729 | return err; | 1530 | return err; |
| 1730 | } | 1531 | } |
| 1731 | 1532 | ||
| @@ -1835,21 +1636,15 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
| 1835 | char pool_name[14]; | 1636 | char pool_name[14]; |
| 1836 | int i; | 1637 | int i; |
| 1837 | 1638 | ||
| 1838 | /* allocate sw descriptor pool for SED */ | ||
| 1839 | device->sed_pool = kmem_cache_create("ioat_sed", | ||
| 1840 | sizeof(struct ioat_sed_ent), 0, 0, NULL); | ||
| 1841 | if (!device->sed_pool) | ||
| 1842 | return -ENOMEM; | ||
| 1843 | |||
| 1844 | for (i = 0; i < MAX_SED_POOLS; i++) { | 1639 | for (i = 0; i < MAX_SED_POOLS; i++) { |
| 1845 | snprintf(pool_name, 14, "ioat_hw%d_sed", i); | 1640 | snprintf(pool_name, 14, "ioat_hw%d_sed", i); |
| 1846 | 1641 | ||
| 1847 | /* allocate SED DMA pool */ | 1642 | /* allocate SED DMA pool */ |
| 1848 | device->sed_hw_pool[i] = dma_pool_create(pool_name, | 1643 | device->sed_hw_pool[i] = dmam_pool_create(pool_name, |
| 1849 | &pdev->dev, | 1644 | &pdev->dev, |
| 1850 | SED_SIZE * (i + 1), 64, 0); | 1645 | SED_SIZE * (i + 1), 64, 0); |
| 1851 | if (!device->sed_hw_pool[i]) | 1646 | if (!device->sed_hw_pool[i]) |
| 1852 | goto sed_pool_cleanup; | 1647 | return -ENOMEM; |
| 1853 | 1648 | ||
| 1854 | } | 1649 | } |
| 1855 | } | 1650 | } |
| @@ -1875,28 +1670,4 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
| 1875 | device->dca = ioat3_dca_init(pdev, device->reg_base); | 1670 | device->dca = ioat3_dca_init(pdev, device->reg_base); |
| 1876 | 1671 | ||
| 1877 | return 0; | 1672 | return 0; |
| 1878 | |||
| 1879 | sed_pool_cleanup: | ||
| 1880 | if (device->sed_pool) { | ||
| 1881 | int i; | ||
| 1882 | kmem_cache_destroy(device->sed_pool); | ||
| 1883 | |||
| 1884 | for (i = 0; i < MAX_SED_POOLS; i++) | ||
| 1885 | if (device->sed_hw_pool[i]) | ||
| 1886 | dma_pool_destroy(device->sed_hw_pool[i]); | ||
| 1887 | } | ||
| 1888 | |||
| 1889 | return -ENOMEM; | ||
| 1890 | } | ||
| 1891 | |||
| 1892 | void ioat3_dma_remove(struct ioatdma_device *device) | ||
| 1893 | { | ||
| 1894 | if (device->sed_pool) { | ||
| 1895 | int i; | ||
| 1896 | kmem_cache_destroy(device->sed_pool); | ||
| 1897 | |||
| 1898 | for (i = 0; i < MAX_SED_POOLS; i++) | ||
| 1899 | if (device->sed_hw_pool[i]) | ||
| 1900 | dma_pool_destroy(device->sed_hw_pool[i]); | ||
| 1901 | } | ||
| 1902 | } | 1673 | } |
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 2c8d560e6334..1d051cd045db 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c | |||
| @@ -123,6 +123,7 @@ module_param(ioat_dca_enabled, int, 0644); | |||
| 123 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); | 123 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); |
| 124 | 124 | ||
| 125 | struct kmem_cache *ioat2_cache; | 125 | struct kmem_cache *ioat2_cache; |
| 126 | struct kmem_cache *ioat3_sed_cache; | ||
| 126 | 127 | ||
| 127 | #define DRV_NAME "ioatdma" | 128 | #define DRV_NAME "ioatdma" |
| 128 | 129 | ||
| @@ -207,9 +208,6 @@ static void ioat_remove(struct pci_dev *pdev) | |||
| 207 | if (!device) | 208 | if (!device) |
| 208 | return; | 209 | return; |
| 209 | 210 | ||
| 210 | if (device->version >= IOAT_VER_3_0) | ||
| 211 | ioat3_dma_remove(device); | ||
| 212 | |||
| 213 | dev_err(&pdev->dev, "Removing dma and dca services\n"); | 211 | dev_err(&pdev->dev, "Removing dma and dca services\n"); |
| 214 | if (device->dca) { | 212 | if (device->dca) { |
| 215 | unregister_dca_provider(device->dca, &pdev->dev); | 213 | unregister_dca_provider(device->dca, &pdev->dev); |
| @@ -221,7 +219,7 @@ static void ioat_remove(struct pci_dev *pdev) | |||
| 221 | 219 | ||
| 222 | static int __init ioat_init_module(void) | 220 | static int __init ioat_init_module(void) |
| 223 | { | 221 | { |
| 224 | int err; | 222 | int err = -ENOMEM; |
| 225 | 223 | ||
| 226 | pr_info("%s: Intel(R) QuickData Technology Driver %s\n", | 224 | pr_info("%s: Intel(R) QuickData Technology Driver %s\n", |
| 227 | DRV_NAME, IOAT_DMA_VERSION); | 225 | DRV_NAME, IOAT_DMA_VERSION); |
| @@ -231,9 +229,21 @@ static int __init ioat_init_module(void) | |||
| 231 | if (!ioat2_cache) | 229 | if (!ioat2_cache) |
| 232 | return -ENOMEM; | 230 | return -ENOMEM; |
| 233 | 231 | ||
| 232 | ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); | ||
| 233 | if (!ioat3_sed_cache) | ||
| 234 | goto err_ioat2_cache; | ||
| 235 | |||
| 234 | err = pci_register_driver(&ioat_pci_driver); | 236 | err = pci_register_driver(&ioat_pci_driver); |
| 235 | if (err) | 237 | if (err) |
| 236 | kmem_cache_destroy(ioat2_cache); | 238 | goto err_ioat3_cache; |
| 239 | |||
| 240 | return 0; | ||
| 241 | |||
| 242 | err_ioat3_cache: | ||
| 243 | kmem_cache_destroy(ioat3_sed_cache); | ||
| 244 | |||
| 245 | err_ioat2_cache: | ||
| 246 | kmem_cache_destroy(ioat2_cache); | ||
| 237 | 247 | ||
| 238 | return err; | 248 | return err; |
| 239 | } | 249 | } |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index dd8b44a56e5d..c56137bc3868 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
| @@ -61,80 +61,6 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) | |||
| 61 | } | 61 | } |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | static void | ||
| 65 | iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) | ||
| 66 | { | ||
| 67 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
| 68 | struct iop_adma_desc_slot *unmap = desc->group_head; | ||
| 69 | struct device *dev = &iop_chan->device->pdev->dev; | ||
| 70 | u32 len = unmap->unmap_len; | ||
| 71 | enum dma_ctrl_flags flags = tx->flags; | ||
| 72 | u32 src_cnt; | ||
| 73 | dma_addr_t addr; | ||
| 74 | dma_addr_t dest; | ||
| 75 | |||
| 76 | src_cnt = unmap->unmap_src_cnt; | ||
| 77 | dest = iop_desc_get_dest_addr(unmap, iop_chan); | ||
| 78 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
| 79 | enum dma_data_direction dir; | ||
| 80 | |||
| 81 | if (src_cnt > 1) /* is xor? */ | ||
| 82 | dir = DMA_BIDIRECTIONAL; | ||
| 83 | else | ||
| 84 | dir = DMA_FROM_DEVICE; | ||
| 85 | |||
| 86 | dma_unmap_page(dev, dest, len, dir); | ||
| 87 | } | ||
| 88 | |||
| 89 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 90 | while (src_cnt--) { | ||
| 91 | addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt); | ||
| 92 | if (addr == dest) | ||
| 93 | continue; | ||
| 94 | dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); | ||
| 95 | } | ||
| 96 | } | ||
| 97 | desc->group_head = NULL; | ||
| 98 | } | ||
| 99 | |||
| 100 | static void | ||
| 101 | iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) | ||
| 102 | { | ||
| 103 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
| 104 | struct iop_adma_desc_slot *unmap = desc->group_head; | ||
| 105 | struct device *dev = &iop_chan->device->pdev->dev; | ||
| 106 | u32 len = unmap->unmap_len; | ||
| 107 | enum dma_ctrl_flags flags = tx->flags; | ||
| 108 | u32 src_cnt = unmap->unmap_src_cnt; | ||
| 109 | dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan); | ||
| 110 | dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan); | ||
| 111 | int i; | ||
| 112 | |||
| 113 | if (tx->flags & DMA_PREP_CONTINUE) | ||
| 114 | src_cnt -= 3; | ||
| 115 | |||
| 116 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) { | ||
| 117 | dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL); | ||
| 118 | dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL); | ||
| 119 | } | ||
| 120 | |||
| 121 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 122 | dma_addr_t addr; | ||
| 123 | |||
| 124 | for (i = 0; i < src_cnt; i++) { | ||
| 125 | addr = iop_desc_get_src_addr(unmap, iop_chan, i); | ||
| 126 | dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); | ||
| 127 | } | ||
| 128 | if (desc->pq_check_result) { | ||
| 129 | dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE); | ||
| 130 | dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE); | ||
| 131 | } | ||
| 132 | } | ||
| 133 | |||
| 134 | desc->group_head = NULL; | ||
| 135 | } | ||
| 136 | |||
| 137 | |||
| 138 | static dma_cookie_t | 64 | static dma_cookie_t |
| 139 | iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | 65 | iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, |
| 140 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) | 66 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) |
| @@ -152,15 +78,9 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
| 152 | if (tx->callback) | 78 | if (tx->callback) |
| 153 | tx->callback(tx->callback_param); | 79 | tx->callback(tx->callback_param); |
| 154 | 80 | ||
| 155 | /* unmap dma addresses | 81 | dma_descriptor_unmap(tx); |
| 156 | * (unmap_single vs unmap_page?) | 82 | if (desc->group_head) |
| 157 | */ | 83 | desc->group_head = NULL; |
| 158 | if (desc->group_head && desc->unmap_len) { | ||
| 159 | if (iop_desc_is_pq(desc)) | ||
| 160 | iop_desc_unmap_pq(iop_chan, desc); | ||
| 161 | else | ||
| 162 | iop_desc_unmap(iop_chan, desc); | ||
| 163 | } | ||
| 164 | } | 84 | } |
| 165 | 85 | ||
| 166 | /* run dependent operations */ | 86 | /* run dependent operations */ |
| @@ -591,7 +511,6 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | |||
| 591 | if (sw_desc) { | 511 | if (sw_desc) { |
| 592 | grp_start = sw_desc->group_head; | 512 | grp_start = sw_desc->group_head; |
| 593 | iop_desc_init_interrupt(grp_start, iop_chan); | 513 | iop_desc_init_interrupt(grp_start, iop_chan); |
| 594 | grp_start->unmap_len = 0; | ||
| 595 | sw_desc->async_tx.flags = flags; | 514 | sw_desc->async_tx.flags = flags; |
| 596 | } | 515 | } |
| 597 | spin_unlock_bh(&iop_chan->lock); | 516 | spin_unlock_bh(&iop_chan->lock); |
| @@ -623,8 +542,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |||
| 623 | iop_desc_set_byte_count(grp_start, iop_chan, len); | 542 | iop_desc_set_byte_count(grp_start, iop_chan, len); |
| 624 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | 543 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); |
| 625 | iop_desc_set_memcpy_src_addr(grp_start, dma_src); | 544 | iop_desc_set_memcpy_src_addr(grp_start, dma_src); |
| 626 | sw_desc->unmap_src_cnt = 1; | ||
| 627 | sw_desc->unmap_len = len; | ||
| 628 | sw_desc->async_tx.flags = flags; | 545 | sw_desc->async_tx.flags = flags; |
| 629 | } | 546 | } |
| 630 | spin_unlock_bh(&iop_chan->lock); | 547 | spin_unlock_bh(&iop_chan->lock); |
| @@ -657,8 +574,6 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, | |||
| 657 | iop_desc_init_xor(grp_start, src_cnt, flags); | 574 | iop_desc_init_xor(grp_start, src_cnt, flags); |
| 658 | iop_desc_set_byte_count(grp_start, iop_chan, len); | 575 | iop_desc_set_byte_count(grp_start, iop_chan, len); |
| 659 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | 576 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); |
| 660 | sw_desc->unmap_src_cnt = src_cnt; | ||
| 661 | sw_desc->unmap_len = len; | ||
| 662 | sw_desc->async_tx.flags = flags; | 577 | sw_desc->async_tx.flags = flags; |
| 663 | while (src_cnt--) | 578 | while (src_cnt--) |
| 664 | iop_desc_set_xor_src_addr(grp_start, src_cnt, | 579 | iop_desc_set_xor_src_addr(grp_start, src_cnt, |
| @@ -694,8 +609,6 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, | |||
| 694 | grp_start->xor_check_result = result; | 609 | grp_start->xor_check_result = result; |
| 695 | pr_debug("\t%s: grp_start->xor_check_result: %p\n", | 610 | pr_debug("\t%s: grp_start->xor_check_result: %p\n", |
| 696 | __func__, grp_start->xor_check_result); | 611 | __func__, grp_start->xor_check_result); |
| 697 | sw_desc->unmap_src_cnt = src_cnt; | ||
| 698 | sw_desc->unmap_len = len; | ||
| 699 | sw_desc->async_tx.flags = flags; | 612 | sw_desc->async_tx.flags = flags; |
| 700 | while (src_cnt--) | 613 | while (src_cnt--) |
| 701 | iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, | 614 | iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, |
| @@ -748,8 +661,6 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |||
| 748 | dst[0] = dst[1] & 0x7; | 661 | dst[0] = dst[1] & 0x7; |
| 749 | 662 | ||
| 750 | iop_desc_set_pq_addr(g, dst); | 663 | iop_desc_set_pq_addr(g, dst); |
| 751 | sw_desc->unmap_src_cnt = src_cnt; | ||
| 752 | sw_desc->unmap_len = len; | ||
| 753 | sw_desc->async_tx.flags = flags; | 664 | sw_desc->async_tx.flags = flags; |
| 754 | for (i = 0; i < src_cnt; i++) | 665 | for (i = 0; i < src_cnt; i++) |
| 755 | iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); | 666 | iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); |
| @@ -804,8 +715,6 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |||
| 804 | g->pq_check_result = pqres; | 715 | g->pq_check_result = pqres; |
| 805 | pr_debug("\t%s: g->pq_check_result: %p\n", | 716 | pr_debug("\t%s: g->pq_check_result: %p\n", |
| 806 | __func__, g->pq_check_result); | 717 | __func__, g->pq_check_result); |
| 807 | sw_desc->unmap_src_cnt = src_cnt+2; | ||
| 808 | sw_desc->unmap_len = len; | ||
| 809 | sw_desc->async_tx.flags = flags; | 718 | sw_desc->async_tx.flags = flags; |
| 810 | while (src_cnt--) | 719 | while (src_cnt--) |
| 811 | iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, | 720 | iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, |
| @@ -864,7 +773,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan, | |||
| 864 | int ret; | 773 | int ret; |
| 865 | 774 | ||
| 866 | ret = dma_cookie_status(chan, cookie, txstate); | 775 | ret = dma_cookie_status(chan, cookie, txstate); |
| 867 | if (ret == DMA_SUCCESS) | 776 | if (ret == DMA_COMPLETE) |
| 868 | return ret; | 777 | return ret; |
| 869 | 778 | ||
| 870 | iop_adma_slot_cleanup(iop_chan); | 779 | iop_adma_slot_cleanup(iop_chan); |
| @@ -983,7 +892,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
| 983 | msleep(1); | 892 | msleep(1); |
| 984 | 893 | ||
| 985 | if (iop_adma_status(dma_chan, cookie, NULL) != | 894 | if (iop_adma_status(dma_chan, cookie, NULL) != |
| 986 | DMA_SUCCESS) { | 895 | DMA_COMPLETE) { |
| 987 | dev_err(dma_chan->device->dev, | 896 | dev_err(dma_chan->device->dev, |
| 988 | "Self-test copy timed out, disabling\n"); | 897 | "Self-test copy timed out, disabling\n"); |
| 989 | err = -ENODEV; | 898 | err = -ENODEV; |
| @@ -1083,7 +992,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
| 1083 | msleep(8); | 992 | msleep(8); |
| 1084 | 993 | ||
| 1085 | if (iop_adma_status(dma_chan, cookie, NULL) != | 994 | if (iop_adma_status(dma_chan, cookie, NULL) != |
| 1086 | DMA_SUCCESS) { | 995 | DMA_COMPLETE) { |
| 1087 | dev_err(dma_chan->device->dev, | 996 | dev_err(dma_chan->device->dev, |
| 1088 | "Self-test xor timed out, disabling\n"); | 997 | "Self-test xor timed out, disabling\n"); |
| 1089 | err = -ENODEV; | 998 | err = -ENODEV; |
| @@ -1129,7 +1038,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
| 1129 | iop_adma_issue_pending(dma_chan); | 1038 | iop_adma_issue_pending(dma_chan); |
| 1130 | msleep(8); | 1039 | msleep(8); |
| 1131 | 1040 | ||
| 1132 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1041 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { |
| 1133 | dev_err(dma_chan->device->dev, | 1042 | dev_err(dma_chan->device->dev, |
| 1134 | "Self-test zero sum timed out, disabling\n"); | 1043 | "Self-test zero sum timed out, disabling\n"); |
| 1135 | err = -ENODEV; | 1044 | err = -ENODEV; |
| @@ -1158,7 +1067,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
| 1158 | iop_adma_issue_pending(dma_chan); | 1067 | iop_adma_issue_pending(dma_chan); |
| 1159 | msleep(8); | 1068 | msleep(8); |
| 1160 | 1069 | ||
| 1161 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1070 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { |
| 1162 | dev_err(dma_chan->device->dev, | 1071 | dev_err(dma_chan->device->dev, |
| 1163 | "Self-test non-zero sum timed out, disabling\n"); | 1072 | "Self-test non-zero sum timed out, disabling\n"); |
| 1164 | err = -ENODEV; | 1073 | err = -ENODEV; |
| @@ -1254,7 +1163,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) | |||
| 1254 | msleep(8); | 1163 | msleep(8); |
| 1255 | 1164 | ||
| 1256 | if (iop_adma_status(dma_chan, cookie, NULL) != | 1165 | if (iop_adma_status(dma_chan, cookie, NULL) != |
| 1257 | DMA_SUCCESS) { | 1166 | DMA_COMPLETE) { |
| 1258 | dev_err(dev, "Self-test pq timed out, disabling\n"); | 1167 | dev_err(dev, "Self-test pq timed out, disabling\n"); |
| 1259 | err = -ENODEV; | 1168 | err = -ENODEV; |
| 1260 | goto free_resources; | 1169 | goto free_resources; |
| @@ -1291,7 +1200,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) | |||
| 1291 | msleep(8); | 1200 | msleep(8); |
| 1292 | 1201 | ||
| 1293 | if (iop_adma_status(dma_chan, cookie, NULL) != | 1202 | if (iop_adma_status(dma_chan, cookie, NULL) != |
| 1294 | DMA_SUCCESS) { | 1203 | DMA_COMPLETE) { |
| 1295 | dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); | 1204 | dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); |
| 1296 | err = -ENODEV; | 1205 | err = -ENODEV; |
| 1297 | goto free_resources; | 1206 | goto free_resources; |
| @@ -1323,7 +1232,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) | |||
| 1323 | msleep(8); | 1232 | msleep(8); |
| 1324 | 1233 | ||
| 1325 | if (iop_adma_status(dma_chan, cookie, NULL) != | 1234 | if (iop_adma_status(dma_chan, cookie, NULL) != |
| 1326 | DMA_SUCCESS) { | 1235 | DMA_COMPLETE) { |
| 1327 | dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); | 1236 | dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); |
| 1328 | err = -ENODEV; | 1237 | err = -ENODEV; |
| 1329 | goto free_resources; | 1238 | goto free_resources; |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index cb9c0bc317e8..128ca143486d 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
| @@ -1232,8 +1232,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) | |||
| 1232 | desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); | 1232 | desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); |
| 1233 | descnew = desc; | 1233 | descnew = desc; |
| 1234 | 1234 | ||
| 1235 | dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n", | 1235 | dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n", |
| 1236 | irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf); | 1236 | irq, (u64)sg_dma_address(*sg), |
| 1237 | sgnext ? (u64)sg_dma_address(sgnext) : 0, | ||
| 1238 | ichan->active_buffer, curbuf); | ||
| 1237 | 1239 | ||
| 1238 | /* Find the descriptor of sgnext */ | 1240 | /* Find the descriptor of sgnext */ |
| 1239 | sgnew = idmac_sg_next(ichan, &descnew, *sg); | 1241 | sgnew = idmac_sg_next(ichan, &descnew, *sg); |
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index a2c330f5f952..e26075408e9b 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c | |||
| @@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan, | |||
| 344 | size_t bytes = 0; | 344 | size_t bytes = 0; |
| 345 | 345 | ||
| 346 | ret = dma_cookie_status(&c->vc.chan, cookie, state); | 346 | ret = dma_cookie_status(&c->vc.chan, cookie, state); |
| 347 | if (ret == DMA_SUCCESS) | 347 | if (ret == DMA_COMPLETE) |
| 348 | return ret; | 348 | return ret; |
| 349 | 349 | ||
| 350 | spin_lock_irqsave(&c->vc.lock, flags); | 350 | spin_lock_irqsave(&c->vc.lock, flags); |
| @@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op) | |||
| 693 | 693 | ||
| 694 | irq = platform_get_irq(op, 0); | 694 | irq = platform_get_irq(op, 0); |
| 695 | ret = devm_request_irq(&op->dev, irq, | 695 | ret = devm_request_irq(&op->dev, irq, |
| 696 | k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d); | 696 | k3_dma_int_handler, 0, DRIVER_NAME, d); |
| 697 | if (ret) | 697 | if (ret) |
| 698 | return ret; | 698 | return ret; |
| 699 | 699 | ||
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index ff8d7827f8cb..dcb1e05149a7 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
| @@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data) | |||
| 798 | * move the descriptors to a temporary list so we can drop | 798 | * move the descriptors to a temporary list so we can drop |
| 799 | * the lock during the entire cleanup operation | 799 | * the lock during the entire cleanup operation |
| 800 | */ | 800 | */ |
| 801 | list_del(&desc->node); | 801 | list_move(&desc->node, &chain_cleanup); |
| 802 | list_add(&desc->node, &chain_cleanup); | ||
| 803 | 802 | ||
| 804 | /* | 803 | /* |
| 805 | * Look for the first list entry which has the ENDIRQEN flag | 804 | * Look for the first list entry which has the ENDIRQEN flag |
| @@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, | |||
| 863 | 862 | ||
| 864 | if (irq) { | 863 | if (irq) { |
| 865 | ret = devm_request_irq(pdev->dev, irq, | 864 | ret = devm_request_irq(pdev->dev, irq, |
| 866 | mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); | 865 | mmp_pdma_chan_handler, 0, "pdma", phy); |
| 867 | if (ret) { | 866 | if (ret) { |
| 868 | dev_err(pdev->dev, "channel request irq fail!\n"); | 867 | dev_err(pdev->dev, "channel request irq fail!\n"); |
| 869 | return ret; | 868 | return ret; |
| @@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
| 970 | /* all chan share one irq, demux inside */ | 969 | /* all chan share one irq, demux inside */ |
| 971 | irq = platform_get_irq(op, 0); | 970 | irq = platform_get_irq(op, 0); |
| 972 | ret = devm_request_irq(pdev->dev, irq, | 971 | ret = devm_request_irq(pdev->dev, irq, |
| 973 | mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); | 972 | mmp_pdma_int_handler, 0, "pdma", pdev); |
| 974 | if (ret) | 973 | if (ret) |
| 975 | return ret; | 974 | return ret; |
| 976 | } | 975 | } |
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index d3b6358e5a27..3ddacc14a736 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c | |||
| @@ -62,6 +62,11 @@ | |||
| 62 | #define TDCR_BURSTSZ_16B (0x3 << 6) | 62 | #define TDCR_BURSTSZ_16B (0x3 << 6) |
| 63 | #define TDCR_BURSTSZ_32B (0x6 << 6) | 63 | #define TDCR_BURSTSZ_32B (0x6 << 6) |
| 64 | #define TDCR_BURSTSZ_64B (0x7 << 6) | 64 | #define TDCR_BURSTSZ_64B (0x7 << 6) |
| 65 | #define TDCR_BURSTSZ_SQU_1B (0x5 << 6) | ||
| 66 | #define TDCR_BURSTSZ_SQU_2B (0x6 << 6) | ||
| 67 | #define TDCR_BURSTSZ_SQU_4B (0x0 << 6) | ||
| 68 | #define TDCR_BURSTSZ_SQU_8B (0x1 << 6) | ||
| 69 | #define TDCR_BURSTSZ_SQU_16B (0x3 << 6) | ||
| 65 | #define TDCR_BURSTSZ_SQU_32B (0x7 << 6) | 70 | #define TDCR_BURSTSZ_SQU_32B (0x7 << 6) |
| 66 | #define TDCR_BURSTSZ_128B (0x5 << 6) | 71 | #define TDCR_BURSTSZ_128B (0x5 << 6) |
| 67 | #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */ | 72 | #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */ |
| @@ -158,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) | |||
| 158 | /* disable irq */ | 163 | /* disable irq */ |
| 159 | writel(0, tdmac->reg_base + TDIMR); | 164 | writel(0, tdmac->reg_base + TDIMR); |
| 160 | 165 | ||
| 161 | tdmac->status = DMA_SUCCESS; | 166 | tdmac->status = DMA_COMPLETE; |
| 162 | } | 167 | } |
| 163 | 168 | ||
| 164 | static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) | 169 | static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) |
| @@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) | |||
| 228 | return -EINVAL; | 233 | return -EINVAL; |
| 229 | } | 234 | } |
| 230 | } else if (tdmac->type == PXA910_SQU) { | 235 | } else if (tdmac->type == PXA910_SQU) { |
| 231 | tdcr |= TDCR_BURSTSZ_SQU_32B; | ||
| 232 | tdcr |= TDCR_SSPMOD; | 236 | tdcr |= TDCR_SSPMOD; |
| 237 | |||
| 238 | switch (tdmac->burst_sz) { | ||
| 239 | case 1: | ||
| 240 | tdcr |= TDCR_BURSTSZ_SQU_1B; | ||
| 241 | break; | ||
| 242 | case 2: | ||
| 243 | tdcr |= TDCR_BURSTSZ_SQU_2B; | ||
| 244 | break; | ||
| 245 | case 4: | ||
| 246 | tdcr |= TDCR_BURSTSZ_SQU_4B; | ||
| 247 | break; | ||
| 248 | case 8: | ||
| 249 | tdcr |= TDCR_BURSTSZ_SQU_8B; | ||
| 250 | break; | ||
| 251 | case 16: | ||
| 252 | tdcr |= TDCR_BURSTSZ_SQU_16B; | ||
| 253 | break; | ||
| 254 | case 32: | ||
| 255 | tdcr |= TDCR_BURSTSZ_SQU_32B; | ||
| 256 | break; | ||
| 257 | default: | ||
| 258 | dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); | ||
| 259 | return -EINVAL; | ||
| 260 | } | ||
| 233 | } | 261 | } |
| 234 | 262 | ||
| 235 | writel(tdcr, tdmac->reg_base + TDCR); | 263 | writel(tdcr, tdmac->reg_base + TDCR); |
| @@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan) | |||
| 324 | 352 | ||
| 325 | if (tdmac->irq) { | 353 | if (tdmac->irq) { |
| 326 | ret = devm_request_irq(tdmac->dev, tdmac->irq, | 354 | ret = devm_request_irq(tdmac->dev, tdmac->irq, |
| 327 | mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac); | 355 | mmp_tdma_chan_handler, 0, "tdma", tdmac); |
| 328 | if (ret) | 356 | if (ret) |
| 329 | return ret; | 357 | return ret; |
| 330 | } | 358 | } |
| @@ -365,7 +393,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( | |||
| 365 | int num_periods = buf_len / period_len; | 393 | int num_periods = buf_len / period_len; |
| 366 | int i = 0, buf = 0; | 394 | int i = 0, buf = 0; |
| 367 | 395 | ||
| 368 | if (tdmac->status != DMA_SUCCESS) | 396 | if (tdmac->status != DMA_COMPLETE) |
| 369 | return NULL; | 397 | return NULL; |
| 370 | 398 | ||
| 371 | if (period_len > TDMA_MAX_XFER_BYTES) { | 399 | if (period_len > TDMA_MAX_XFER_BYTES) { |
| @@ -499,7 +527,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, | |||
| 499 | tdmac->idx = idx; | 527 | tdmac->idx = idx; |
| 500 | tdmac->type = type; | 528 | tdmac->type = type; |
| 501 | tdmac->reg_base = (unsigned long)tdev->base + idx * 4; | 529 | tdmac->reg_base = (unsigned long)tdev->base + idx * 4; |
| 502 | tdmac->status = DMA_SUCCESS; | 530 | tdmac->status = DMA_COMPLETE; |
| 503 | tdev->tdmac[tdmac->idx] = tdmac; | 531 | tdev->tdmac[tdmac->idx] = tdmac; |
| 504 | tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); | 532 | tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); |
| 505 | 533 | ||
| @@ -554,7 +582,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) | |||
| 554 | if (irq_num != chan_num) { | 582 | if (irq_num != chan_num) { |
| 555 | irq = platform_get_irq(pdev, 0); | 583 | irq = platform_get_irq(pdev, 0); |
| 556 | ret = devm_request_irq(&pdev->dev, irq, | 584 | ret = devm_request_irq(&pdev->dev, irq, |
| 557 | mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); | 585 | mmp_tdma_int_handler, 0, "tdma", tdev); |
| 558 | if (ret) | 586 | if (ret) |
| 559 | return ret; | 587 | return ret; |
| 560 | } | 588 | } |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 536dcb8ba5fd..7807f0ef4e20 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
| @@ -60,14 +60,6 @@ static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) | |||
| 60 | return hw_desc->phy_dest_addr; | 60 | return hw_desc->phy_dest_addr; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, | ||
| 64 | int src_idx) | ||
| 65 | { | ||
| 66 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
| 67 | return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)]; | ||
| 68 | } | ||
| 69 | |||
| 70 | |||
| 71 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, | 63 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, |
| 72 | u32 byte_count) | 64 | u32 byte_count) |
| 73 | { | 65 | { |
| @@ -278,42 +270,9 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | |||
| 278 | desc->async_tx.callback( | 270 | desc->async_tx.callback( |
| 279 | desc->async_tx.callback_param); | 271 | desc->async_tx.callback_param); |
| 280 | 272 | ||
| 281 | /* unmap dma addresses | 273 | dma_descriptor_unmap(&desc->async_tx); |
| 282 | * (unmap_single vs unmap_page?) | 274 | if (desc->group_head) |
| 283 | */ | ||
| 284 | if (desc->group_head && desc->unmap_len) { | ||
| 285 | struct mv_xor_desc_slot *unmap = desc->group_head; | ||
| 286 | struct device *dev = mv_chan_to_devp(mv_chan); | ||
| 287 | u32 len = unmap->unmap_len; | ||
| 288 | enum dma_ctrl_flags flags = desc->async_tx.flags; | ||
| 289 | u32 src_cnt; | ||
| 290 | dma_addr_t addr; | ||
| 291 | dma_addr_t dest; | ||
| 292 | |||
| 293 | src_cnt = unmap->unmap_src_cnt; | ||
| 294 | dest = mv_desc_get_dest_addr(unmap); | ||
| 295 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
| 296 | enum dma_data_direction dir; | ||
| 297 | |||
| 298 | if (src_cnt > 1) /* is xor ? */ | ||
| 299 | dir = DMA_BIDIRECTIONAL; | ||
| 300 | else | ||
| 301 | dir = DMA_FROM_DEVICE; | ||
| 302 | dma_unmap_page(dev, dest, len, dir); | ||
| 303 | } | ||
| 304 | |||
| 305 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 306 | while (src_cnt--) { | ||
| 307 | addr = mv_desc_get_src_addr(unmap, | ||
| 308 | src_cnt); | ||
| 309 | if (addr == dest) | ||
| 310 | continue; | ||
| 311 | dma_unmap_page(dev, addr, len, | ||
| 312 | DMA_TO_DEVICE); | ||
| 313 | } | ||
| 314 | } | ||
| 315 | desc->group_head = NULL; | 275 | desc->group_head = NULL; |
| 316 | } | ||
| 317 | } | 276 | } |
| 318 | 277 | ||
| 319 | /* run dependent operations */ | 278 | /* run dependent operations */ |
| @@ -749,7 +708,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan, | |||
| 749 | enum dma_status ret; | 708 | enum dma_status ret; |
| 750 | 709 | ||
| 751 | ret = dma_cookie_status(chan, cookie, txstate); | 710 | ret = dma_cookie_status(chan, cookie, txstate); |
| 752 | if (ret == DMA_SUCCESS) { | 711 | if (ret == DMA_COMPLETE) { |
| 753 | mv_xor_clean_completed_slots(mv_chan); | 712 | mv_xor_clean_completed_slots(mv_chan); |
| 754 | return ret; | 713 | return ret; |
| 755 | } | 714 | } |
| @@ -874,7 +833,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
| 874 | msleep(1); | 833 | msleep(1); |
| 875 | 834 | ||
| 876 | if (mv_xor_status(dma_chan, cookie, NULL) != | 835 | if (mv_xor_status(dma_chan, cookie, NULL) != |
| 877 | DMA_SUCCESS) { | 836 | DMA_COMPLETE) { |
| 878 | dev_err(dma_chan->device->dev, | 837 | dev_err(dma_chan->device->dev, |
| 879 | "Self-test copy timed out, disabling\n"); | 838 | "Self-test copy timed out, disabling\n"); |
| 880 | err = -ENODEV; | 839 | err = -ENODEV; |
| @@ -968,7 +927,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
| 968 | msleep(8); | 927 | msleep(8); |
| 969 | 928 | ||
| 970 | if (mv_xor_status(dma_chan, cookie, NULL) != | 929 | if (mv_xor_status(dma_chan, cookie, NULL) != |
| 971 | DMA_SUCCESS) { | 930 | DMA_COMPLETE) { |
| 972 | dev_err(dma_chan->device->dev, | 931 | dev_err(dma_chan->device->dev, |
| 973 | "Self-test xor timed out, disabling\n"); | 932 | "Self-test xor timed out, disabling\n"); |
| 974 | err = -ENODEV; | 933 | err = -ENODEV; |
| @@ -1076,10 +1035,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
| 1076 | } | 1035 | } |
| 1077 | 1036 | ||
| 1078 | mv_chan->mmr_base = xordev->xor_base; | 1037 | mv_chan->mmr_base = xordev->xor_base; |
| 1079 | if (!mv_chan->mmr_base) { | 1038 | mv_chan->mmr_high_base = xordev->xor_high_base; |
| 1080 | ret = -ENOMEM; | ||
| 1081 | goto err_free_dma; | ||
| 1082 | } | ||
| 1083 | tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) | 1039 | tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) |
| 1084 | mv_chan); | 1040 | mv_chan); |
| 1085 | 1041 | ||
| @@ -1138,7 +1094,7 @@ static void | |||
| 1138 | mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, | 1094 | mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, |
| 1139 | const struct mbus_dram_target_info *dram) | 1095 | const struct mbus_dram_target_info *dram) |
| 1140 | { | 1096 | { |
| 1141 | void __iomem *base = xordev->xor_base; | 1097 | void __iomem *base = xordev->xor_high_base; |
| 1142 | u32 win_enable = 0; | 1098 | u32 win_enable = 0; |
| 1143 | int i; | 1099 | int i; |
| 1144 | 1100 | ||
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 06b067f24c9b..d0749229c875 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
| @@ -34,13 +34,13 @@ | |||
| 34 | #define XOR_OPERATION_MODE_MEMCPY 2 | 34 | #define XOR_OPERATION_MODE_MEMCPY 2 |
| 35 | #define XOR_DESCRIPTOR_SWAP BIT(14) | 35 | #define XOR_DESCRIPTOR_SWAP BIT(14) |
| 36 | 36 | ||
| 37 | #define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) | 37 | #define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4)) |
| 38 | #define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) | 38 | #define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4)) |
| 39 | #define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4)) | 39 | #define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4)) |
| 40 | #define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4)) | 40 | #define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4)) |
| 41 | #define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4)) | 41 | #define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4)) |
| 42 | #define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0) | 42 | #define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0) |
| 43 | #define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4) | 43 | #define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4) |
| 44 | 44 | ||
| 45 | #define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4)) | 45 | #define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4)) |
| 46 | #define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4)) | 46 | #define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4)) |
| @@ -50,11 +50,11 @@ | |||
| 50 | #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) | 50 | #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) |
| 51 | #define XOR_INTR_MASK_VALUE 0x3F5 | 51 | #define XOR_INTR_MASK_VALUE 0x3F5 |
| 52 | 52 | ||
| 53 | #define WINDOW_BASE(w) (0x250 + ((w) << 2)) | 53 | #define WINDOW_BASE(w) (0x50 + ((w) << 2)) |
| 54 | #define WINDOW_SIZE(w) (0x270 + ((w) << 2)) | 54 | #define WINDOW_SIZE(w) (0x70 + ((w) << 2)) |
| 55 | #define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) | 55 | #define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2)) |
| 56 | #define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) | 56 | #define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2)) |
| 57 | #define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2)) | 57 | #define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2)) |
| 58 | 58 | ||
| 59 | struct mv_xor_device { | 59 | struct mv_xor_device { |
| 60 | void __iomem *xor_base; | 60 | void __iomem *xor_base; |
| @@ -82,6 +82,7 @@ struct mv_xor_chan { | |||
| 82 | int pending; | 82 | int pending; |
| 83 | spinlock_t lock; /* protects the descriptor slot pool */ | 83 | spinlock_t lock; /* protects the descriptor slot pool */ |
| 84 | void __iomem *mmr_base; | 84 | void __iomem *mmr_base; |
| 85 | void __iomem *mmr_high_base; | ||
| 85 | unsigned int idx; | 86 | unsigned int idx; |
| 86 | int irq; | 87 | int irq; |
| 87 | enum dma_transaction_type current_type; | 88 | enum dma_transaction_type current_type; |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index ccd13df841db..ead491346da7 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/of.h> | 27 | #include <linux/of.h> |
| 28 | #include <linux/of_device.h> | 28 | #include <linux/of_device.h> |
| 29 | #include <linux/of_dma.h> | 29 | #include <linux/of_dma.h> |
| 30 | #include <linux/list.h> | ||
| 30 | 31 | ||
| 31 | #include <asm/irq.h> | 32 | #include <asm/irq.h> |
| 32 | 33 | ||
| @@ -57,6 +58,9 @@ | |||
| 57 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) | 58 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) |
| 58 | #define HW_APBHX_CHn_SEMA(d, n) \ | 59 | #define HW_APBHX_CHn_SEMA(d, n) \ |
| 59 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) | 60 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) |
| 61 | #define HW_APBHX_CHn_BAR(d, n) \ | ||
| 62 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70) | ||
| 63 | #define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70) | ||
| 60 | 64 | ||
| 61 | /* | 65 | /* |
| 62 | * ccw bits definitions | 66 | * ccw bits definitions |
| @@ -115,7 +119,9 @@ struct mxs_dma_chan { | |||
| 115 | int desc_count; | 119 | int desc_count; |
| 116 | enum dma_status status; | 120 | enum dma_status status; |
| 117 | unsigned int flags; | 121 | unsigned int flags; |
| 122 | bool reset; | ||
| 118 | #define MXS_DMA_SG_LOOP (1 << 0) | 123 | #define MXS_DMA_SG_LOOP (1 << 0) |
| 124 | #define MXS_DMA_USE_SEMAPHORE (1 << 1) | ||
| 119 | }; | 125 | }; |
| 120 | 126 | ||
| 121 | #define MXS_DMA_CHANNELS 16 | 127 | #define MXS_DMA_CHANNELS 16 |
| @@ -201,12 +207,47 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) | |||
| 201 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 207 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
| 202 | int chan_id = mxs_chan->chan.chan_id; | 208 | int chan_id = mxs_chan->chan.chan_id; |
| 203 | 209 | ||
| 204 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) | 210 | /* |
| 211 | * mxs dma channel resets can cause a channel stall. To recover from a | ||
| 212 | * channel stall, we have to reset the whole DMA engine. To avoid this, | ||
| 213 | * we use cyclic DMA with semaphores, that are enhanced in | ||
| 214 | * mxs_dma_int_handler. To reset the channel, we can simply stop writing | ||
| 215 | * into the semaphore counter. | ||
| 216 | */ | ||
| 217 | if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && | ||
| 218 | mxs_chan->flags & MXS_DMA_SG_LOOP) { | ||
| 219 | mxs_chan->reset = true; | ||
| 220 | } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) { | ||
| 205 | writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), | 221 | writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), |
| 206 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); | 222 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
| 207 | else | 223 | } else { |
| 224 | unsigned long elapsed = 0; | ||
| 225 | const unsigned long max_wait = 50000; /* 50ms */ | ||
| 226 | void __iomem *reg_dbg1 = mxs_dma->base + | ||
| 227 | HW_APBX_CHn_DEBUG1(mxs_dma, chan_id); | ||
| 228 | |||
| 229 | /* | ||
| 230 | * On i.MX28 APBX, the DMA channel can stop working if we reset | ||
| 231 | * the channel while it is in READ_FLUSH (0x08) state. | ||
| 232 | * We wait here until we leave the state. Then we trigger the | ||
| 233 | * reset. Waiting a maximum of 50ms, the kernel shouldn't crash | ||
| 234 | * because of this. | ||
| 235 | */ | ||
| 236 | while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) { | ||
| 237 | udelay(100); | ||
| 238 | elapsed += 100; | ||
| 239 | } | ||
| 240 | |||
| 241 | if (elapsed >= max_wait) | ||
| 242 | dev_err(&mxs_chan->mxs_dma->pdev->dev, | ||
| 243 | "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n", | ||
| 244 | chan_id); | ||
| 245 | |||
| 208 | writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), | 246 | writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), |
| 209 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); | 247 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); |
| 248 | } | ||
| 249 | |||
| 250 | mxs_chan->status = DMA_COMPLETE; | ||
| 210 | } | 251 | } |
| 211 | 252 | ||
| 212 | static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | 253 | static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) |
| @@ -219,12 +260,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | |||
| 219 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); | 260 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); |
| 220 | 261 | ||
| 221 | /* write 1 to SEMA to kick off the channel */ | 262 | /* write 1 to SEMA to kick off the channel */ |
| 222 | writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); | 263 | if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && |
| 264 | mxs_chan->flags & MXS_DMA_SG_LOOP) { | ||
| 265 | /* A cyclic DMA consists of at least 2 segments, so initialize | ||
| 266 | * the semaphore with 2 so we have enough time to add 1 to the | ||
| 267 | * semaphore if we need to */ | ||
| 268 | writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); | ||
| 269 | } else { | ||
| 270 | writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); | ||
| 271 | } | ||
| 272 | mxs_chan->reset = false; | ||
| 223 | } | 273 | } |
| 224 | 274 | ||
| 225 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) | 275 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) |
| 226 | { | 276 | { |
| 227 | mxs_chan->status = DMA_SUCCESS; | 277 | mxs_chan->status = DMA_COMPLETE; |
| 228 | } | 278 | } |
| 229 | 279 | ||
| 230 | static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) | 280 | static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) |
| @@ -272,58 +322,88 @@ static void mxs_dma_tasklet(unsigned long data) | |||
| 272 | mxs_chan->desc.callback(mxs_chan->desc.callback_param); | 322 | mxs_chan->desc.callback(mxs_chan->desc.callback_param); |
| 273 | } | 323 | } |
| 274 | 324 | ||
| 325 | static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq) | ||
| 326 | { | ||
| 327 | int i; | ||
| 328 | |||
| 329 | for (i = 0; i != mxs_dma->nr_channels; ++i) | ||
| 330 | if (mxs_dma->mxs_chans[i].chan_irq == irq) | ||
| 331 | return i; | ||
| 332 | |||
| 333 | return -EINVAL; | ||
| 334 | } | ||
| 335 | |||
| 275 | static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) | 336 | static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) |
| 276 | { | 337 | { |
| 277 | struct mxs_dma_engine *mxs_dma = dev_id; | 338 | struct mxs_dma_engine *mxs_dma = dev_id; |
| 278 | u32 stat1, stat2; | 339 | struct mxs_dma_chan *mxs_chan; |
| 340 | u32 completed; | ||
| 341 | u32 err; | ||
| 342 | int chan = mxs_dma_irq_to_chan(mxs_dma, irq); | ||
| 343 | |||
| 344 | if (chan < 0) | ||
| 345 | return IRQ_NONE; | ||
| 279 | 346 | ||
| 280 | /* completion status */ | 347 | /* completion status */ |
| 281 | stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); | 348 | completed = readl(mxs_dma->base + HW_APBHX_CTRL1); |
| 282 | stat1 &= MXS_DMA_CHANNELS_MASK; | 349 | completed = (completed >> chan) & 0x1; |
| 283 | writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); | 350 | |
| 351 | /* Clear interrupt */ | ||
| 352 | writel((1 << chan), | ||
| 353 | mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); | ||
| 284 | 354 | ||
| 285 | /* error status */ | 355 | /* error status */ |
| 286 | stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); | 356 | err = readl(mxs_dma->base + HW_APBHX_CTRL2); |
| 287 | writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); | 357 | err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan); |
| 358 | |||
| 359 | /* | ||
| 360 | * error status bit is in the upper 16 bits, error irq bit in the lower | ||
| 361 | * 16 bits. We transform it into a simpler error code: | ||
| 362 | * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR | ||
| 363 | */ | ||
| 364 | err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan); | ||
| 365 | |||
| 366 | /* Clear error irq */ | ||
| 367 | writel((1 << chan), | ||
| 368 | mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); | ||
| 288 | 369 | ||
| 289 | /* | 370 | /* |
| 290 | * When both completion and error of termination bits set at the | 371 | * When both completion and error of termination bits set at the |
| 291 | * same time, we do not take it as an error. IOW, it only becomes | 372 | * same time, we do not take it as an error. IOW, it only becomes |
| 292 | * an error we need to handle here in case of either it's (1) a bus | 373 | * an error we need to handle here in case of either it's a bus |
| 293 | * error or (2) a termination error with no completion. | 374 | * error or a termination error with no completion. 0x01 is termination |
| 375 | * error, so we can subtract err & completed to get the real error case. | ||
| 294 | */ | 376 | */ |
| 295 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ | 377 | err -= err & completed; |
| 296 | (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ | ||
| 297 | |||
| 298 | /* combine error and completion status for checking */ | ||
| 299 | stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; | ||
| 300 | while (stat1) { | ||
| 301 | int channel = fls(stat1) - 1; | ||
| 302 | struct mxs_dma_chan *mxs_chan = | ||
| 303 | &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; | ||
| 304 | |||
| 305 | if (channel >= MXS_DMA_CHANNELS) { | ||
| 306 | dev_dbg(mxs_dma->dma_device.dev, | ||
| 307 | "%s: error in channel %d\n", __func__, | ||
| 308 | channel - MXS_DMA_CHANNELS); | ||
| 309 | mxs_chan->status = DMA_ERROR; | ||
| 310 | mxs_dma_reset_chan(mxs_chan); | ||
| 311 | } else { | ||
| 312 | if (mxs_chan->flags & MXS_DMA_SG_LOOP) | ||
| 313 | mxs_chan->status = DMA_IN_PROGRESS; | ||
| 314 | else | ||
| 315 | mxs_chan->status = DMA_SUCCESS; | ||
| 316 | } | ||
| 317 | 378 | ||
| 318 | stat1 &= ~(1 << channel); | 379 | mxs_chan = &mxs_dma->mxs_chans[chan]; |
| 319 | 380 | ||
| 320 | if (mxs_chan->status == DMA_SUCCESS) | 381 | if (err) { |
| 321 | dma_cookie_complete(&mxs_chan->desc); | 382 | dev_dbg(mxs_dma->dma_device.dev, |
| 383 | "%s: error in channel %d\n", __func__, | ||
| 384 | chan); | ||
| 385 | mxs_chan->status = DMA_ERROR; | ||
| 386 | mxs_dma_reset_chan(mxs_chan); | ||
| 387 | } else if (mxs_chan->status != DMA_COMPLETE) { | ||
| 388 | if (mxs_chan->flags & MXS_DMA_SG_LOOP) { | ||
| 389 | mxs_chan->status = DMA_IN_PROGRESS; | ||
| 390 | if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE) | ||
| 391 | writel(1, mxs_dma->base + | ||
| 392 | HW_APBHX_CHn_SEMA(mxs_dma, chan)); | ||
| 393 | } else { | ||
| 394 | mxs_chan->status = DMA_COMPLETE; | ||
| 395 | } | ||
| 396 | } | ||
| 322 | 397 | ||
| 323 | /* schedule tasklet on this channel */ | 398 | if (mxs_chan->status == DMA_COMPLETE) { |
| 324 | tasklet_schedule(&mxs_chan->tasklet); | 399 | if (mxs_chan->reset) |
| 400 | return IRQ_HANDLED; | ||
| 401 | dma_cookie_complete(&mxs_chan->desc); | ||
| 325 | } | 402 | } |
| 326 | 403 | ||
| 404 | /* schedule tasklet on this channel */ | ||
| 405 | tasklet_schedule(&mxs_chan->tasklet); | ||
| 406 | |||
| 327 | return IRQ_HANDLED; | 407 | return IRQ_HANDLED; |
| 328 | } | 408 | } |
| 329 | 409 | ||
| @@ -523,6 +603,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | |||
| 523 | 603 | ||
| 524 | mxs_chan->status = DMA_IN_PROGRESS; | 604 | mxs_chan->status = DMA_IN_PROGRESS; |
| 525 | mxs_chan->flags |= MXS_DMA_SG_LOOP; | 605 | mxs_chan->flags |= MXS_DMA_SG_LOOP; |
| 606 | mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE; | ||
| 526 | 607 | ||
| 527 | if (num_periods > NUM_CCW) { | 608 | if (num_periods > NUM_CCW) { |
| 528 | dev_err(mxs_dma->dma_device.dev, | 609 | dev_err(mxs_dma->dma_device.dev, |
| @@ -554,6 +635,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | |||
| 554 | ccw->bits |= CCW_IRQ; | 635 | ccw->bits |= CCW_IRQ; |
| 555 | ccw->bits |= CCW_HALT_ON_TERM; | 636 | ccw->bits |= CCW_HALT_ON_TERM; |
| 556 | ccw->bits |= CCW_TERM_FLUSH; | 637 | ccw->bits |= CCW_TERM_FLUSH; |
| 638 | ccw->bits |= CCW_DEC_SEM; | ||
| 557 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? | 639 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
| 558 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); | 640 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); |
| 559 | 641 | ||
| @@ -599,8 +681,24 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, | |||
| 599 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 681 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
| 600 | { | 682 | { |
| 601 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 683 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
| 684 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
| 685 | u32 residue = 0; | ||
| 686 | |||
| 687 | if (mxs_chan->status == DMA_IN_PROGRESS && | ||
| 688 | mxs_chan->flags & MXS_DMA_SG_LOOP) { | ||
| 689 | struct mxs_dma_ccw *last_ccw; | ||
| 690 | u32 bar; | ||
| 691 | |||
| 692 | last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1]; | ||
| 693 | residue = last_ccw->xfer_bytes + last_ccw->bufaddr; | ||
| 694 | |||
| 695 | bar = readl(mxs_dma->base + | ||
| 696 | HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id)); | ||
| 697 | residue -= bar; | ||
| 698 | } | ||
| 602 | 699 | ||
| 603 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); | 700 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, |
| 701 | residue); | ||
| 604 | 702 | ||
| 605 | return mxs_chan->status; | 703 | return mxs_chan->status; |
| 606 | } | 704 | } |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index ec3fc4fd9160..2f66cf4e54fe 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
| @@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan, | |||
| 248 | unsigned long flags; | 248 | unsigned long flags; |
| 249 | 249 | ||
| 250 | ret = dma_cookie_status(chan, cookie, txstate); | 250 | ret = dma_cookie_status(chan, cookie, txstate); |
| 251 | if (ret == DMA_SUCCESS || !txstate) | 251 | if (ret == DMA_COMPLETE || !txstate) |
| 252 | return ret; | 252 | return ret; |
| 253 | 253 | ||
| 254 | spin_lock_irqsave(&c->vc.lock, flags); | 254 | spin_lock_irqsave(&c->vc.lock, flags); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index df8b10fd1726..cdf0483b8f2d 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
| @@ -2268,6 +2268,8 @@ static void pl330_tasklet(unsigned long data) | |||
| 2268 | list_move_tail(&desc->node, &pch->dmac->desc_pool); | 2268 | list_move_tail(&desc->node, &pch->dmac->desc_pool); |
| 2269 | } | 2269 | } |
| 2270 | 2270 | ||
| 2271 | dma_descriptor_unmap(&desc->txd); | ||
| 2272 | |||
| 2271 | if (callback) { | 2273 | if (callback) { |
| 2272 | spin_unlock_irqrestore(&pch->lock, flags); | 2274 | spin_unlock_irqrestore(&pch->lock, flags); |
| 2273 | callback(callback_param); | 2275 | callback(callback_param); |
| @@ -2314,7 +2316,7 @@ bool pl330_filter(struct dma_chan *chan, void *param) | |||
| 2314 | return false; | 2316 | return false; |
| 2315 | 2317 | ||
| 2316 | peri_id = chan->private; | 2318 | peri_id = chan->private; |
| 2317 | return *peri_id == (unsigned)param; | 2319 | return *peri_id == (unsigned long)param; |
| 2318 | } | 2320 | } |
| 2319 | EXPORT_SYMBOL(pl330_filter); | 2321 | EXPORT_SYMBOL(pl330_filter); |
| 2320 | 2322 | ||
| @@ -2926,16 +2928,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 2926 | 2928 | ||
| 2927 | amba_set_drvdata(adev, pdmac); | 2929 | amba_set_drvdata(adev, pdmac); |
| 2928 | 2930 | ||
| 2929 | irq = adev->irq[0]; | 2931 | for (i = 0; i < AMBA_NR_IRQS; i++) { |
| 2930 | ret = request_irq(irq, pl330_irq_handler, 0, | 2932 | irq = adev->irq[i]; |
| 2931 | dev_name(&adev->dev), pi); | 2933 | if (irq) { |
| 2932 | if (ret) | 2934 | ret = devm_request_irq(&adev->dev, irq, |
| 2933 | return ret; | 2935 | pl330_irq_handler, 0, |
| 2936 | dev_name(&adev->dev), pi); | ||
| 2937 | if (ret) | ||
| 2938 | return ret; | ||
| 2939 | } else { | ||
| 2940 | break; | ||
| 2941 | } | ||
| 2942 | } | ||
| 2934 | 2943 | ||
| 2935 | pi->pcfg.periph_id = adev->periphid; | 2944 | pi->pcfg.periph_id = adev->periphid; |
| 2936 | ret = pl330_add(pi); | 2945 | ret = pl330_add(pi); |
| 2937 | if (ret) | 2946 | if (ret) |
| 2938 | goto probe_err1; | 2947 | return ret; |
| 2939 | 2948 | ||
| 2940 | INIT_LIST_HEAD(&pdmac->desc_pool); | 2949 | INIT_LIST_HEAD(&pdmac->desc_pool); |
| 2941 | spin_lock_init(&pdmac->pool_lock); | 2950 | spin_lock_init(&pdmac->pool_lock); |
| @@ -3033,8 +3042,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 3033 | 3042 | ||
| 3034 | return 0; | 3043 | return 0; |
| 3035 | probe_err3: | 3044 | probe_err3: |
| 3036 | amba_set_drvdata(adev, NULL); | ||
| 3037 | |||
| 3038 | /* Idle the DMAC */ | 3045 | /* Idle the DMAC */ |
| 3039 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, | 3046 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, |
| 3040 | chan.device_node) { | 3047 | chan.device_node) { |
| @@ -3048,8 +3055,6 @@ probe_err3: | |||
| 3048 | } | 3055 | } |
| 3049 | probe_err2: | 3056 | probe_err2: |
| 3050 | pl330_del(pi); | 3057 | pl330_del(pi); |
| 3051 | probe_err1: | ||
| 3052 | free_irq(irq, pi); | ||
| 3053 | 3058 | ||
| 3054 | return ret; | 3059 | return ret; |
| 3055 | } | 3060 | } |
| @@ -3059,7 +3064,6 @@ static int pl330_remove(struct amba_device *adev) | |||
| 3059 | struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); | 3064 | struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); |
| 3060 | struct dma_pl330_chan *pch, *_p; | 3065 | struct dma_pl330_chan *pch, *_p; |
| 3061 | struct pl330_info *pi; | 3066 | struct pl330_info *pi; |
| 3062 | int irq; | ||
| 3063 | 3067 | ||
| 3064 | if (!pdmac) | 3068 | if (!pdmac) |
| 3065 | return 0; | 3069 | return 0; |
| @@ -3068,7 +3072,6 @@ static int pl330_remove(struct amba_device *adev) | |||
| 3068 | of_dma_controller_free(adev->dev.of_node); | 3072 | of_dma_controller_free(adev->dev.of_node); |
| 3069 | 3073 | ||
| 3070 | dma_async_device_unregister(&pdmac->ddma); | 3074 | dma_async_device_unregister(&pdmac->ddma); |
| 3071 | amba_set_drvdata(adev, NULL); | ||
| 3072 | 3075 | ||
| 3073 | /* Idle the DMAC */ | 3076 | /* Idle the DMAC */ |
| 3074 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, | 3077 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, |
| @@ -3086,9 +3089,6 @@ static int pl330_remove(struct amba_device *adev) | |||
| 3086 | 3089 | ||
| 3087 | pl330_del(pi); | 3090 | pl330_del(pi); |
| 3088 | 3091 | ||
| 3089 | irq = adev->irq[0]; | ||
| 3090 | free_irq(irq, pi); | ||
| 3091 | |||
| 3092 | return 0; | 3092 | return 0; |
| 3093 | } | 3093 | } |
| 3094 | 3094 | ||
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index e24b5ef486b5..8da48c6b2a38 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
| @@ -804,218 +804,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan, | |||
| 804 | } | 804 | } |
| 805 | 805 | ||
| 806 | /** | 806 | /** |
| 807 | * ppc440spe_desc_get_src_addr - extract the source address from the descriptor | ||
| 808 | */ | ||
| 809 | static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc, | ||
| 810 | struct ppc440spe_adma_chan *chan, int src_idx) | ||
| 811 | { | ||
| 812 | struct dma_cdb *dma_hw_desc; | ||
| 813 | struct xor_cb *xor_hw_desc; | ||
| 814 | |||
| 815 | switch (chan->device->id) { | ||
| 816 | case PPC440SPE_DMA0_ID: | ||
| 817 | case PPC440SPE_DMA1_ID: | ||
| 818 | dma_hw_desc = desc->hw_desc; | ||
| 819 | /* May have 0, 1, 2, or 3 sources */ | ||
| 820 | switch (dma_hw_desc->opc) { | ||
| 821 | case DMA_CDB_OPC_NO_OP: | ||
| 822 | case DMA_CDB_OPC_DFILL128: | ||
| 823 | return 0; | ||
| 824 | case DMA_CDB_OPC_DCHECK128: | ||
| 825 | if (unlikely(src_idx)) { | ||
| 826 | printk(KERN_ERR "%s: try to get %d source for" | ||
| 827 | " DCHECK128\n", __func__, src_idx); | ||
| 828 | BUG(); | ||
| 829 | } | ||
| 830 | return le32_to_cpu(dma_hw_desc->sg1l); | ||
| 831 | case DMA_CDB_OPC_MULTICAST: | ||
| 832 | case DMA_CDB_OPC_MV_SG1_SG2: | ||
| 833 | if (unlikely(src_idx > 2)) { | ||
| 834 | printk(KERN_ERR "%s: try to get %d source from" | ||
| 835 | " DMA descr\n", __func__, src_idx); | ||
| 836 | BUG(); | ||
| 837 | } | ||
| 838 | if (src_idx) { | ||
| 839 | if (le32_to_cpu(dma_hw_desc->sg1u) & | ||
| 840 | DMA_CUED_XOR_WIN_MSK) { | ||
| 841 | u8 region; | ||
| 842 | |||
| 843 | if (src_idx == 1) | ||
| 844 | return le32_to_cpu( | ||
| 845 | dma_hw_desc->sg1l) + | ||
| 846 | desc->unmap_len; | ||
| 847 | |||
| 848 | region = (le32_to_cpu( | ||
| 849 | dma_hw_desc->sg1u)) >> | ||
| 850 | DMA_CUED_REGION_OFF; | ||
| 851 | |||
| 852 | region &= DMA_CUED_REGION_MSK; | ||
| 853 | switch (region) { | ||
| 854 | case DMA_RXOR123: | ||
| 855 | return le32_to_cpu( | ||
| 856 | dma_hw_desc->sg1l) + | ||
| 857 | (desc->unmap_len << 1); | ||
| 858 | case DMA_RXOR124: | ||
| 859 | return le32_to_cpu( | ||
| 860 | dma_hw_desc->sg1l) + | ||
| 861 | (desc->unmap_len * 3); | ||
| 862 | case DMA_RXOR125: | ||
| 863 | return le32_to_cpu( | ||
| 864 | dma_hw_desc->sg1l) + | ||
| 865 | (desc->unmap_len << 2); | ||
| 866 | default: | ||
| 867 | printk(KERN_ERR | ||
| 868 | "%s: try to" | ||
| 869 | " get src3 for region %02x" | ||
| 870 | "PPC440SPE_DESC_RXOR12?\n", | ||
| 871 | __func__, region); | ||
| 872 | BUG(); | ||
| 873 | } | ||
| 874 | } else { | ||
| 875 | printk(KERN_ERR | ||
| 876 | "%s: try to get %d" | ||
| 877 | " source for non-cued descr\n", | ||
| 878 | __func__, src_idx); | ||
| 879 | BUG(); | ||
| 880 | } | ||
| 881 | } | ||
| 882 | return le32_to_cpu(dma_hw_desc->sg1l); | ||
| 883 | default: | ||
| 884 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | ||
| 885 | __func__, dma_hw_desc->opc); | ||
| 886 | BUG(); | ||
| 887 | } | ||
| 888 | return le32_to_cpu(dma_hw_desc->sg1l); | ||
| 889 | case PPC440SPE_XOR_ID: | ||
| 890 | /* May have up to 16 sources */ | ||
| 891 | xor_hw_desc = desc->hw_desc; | ||
| 892 | return xor_hw_desc->ops[src_idx].l; | ||
| 893 | } | ||
| 894 | return 0; | ||
| 895 | } | ||
| 896 | |||
| 897 | /** | ||
| 898 | * ppc440spe_desc_get_dest_addr - extract the destination address from the | ||
| 899 | * descriptor | ||
| 900 | */ | ||
| 901 | static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc, | ||
| 902 | struct ppc440spe_adma_chan *chan, int idx) | ||
| 903 | { | ||
| 904 | struct dma_cdb *dma_hw_desc; | ||
| 905 | struct xor_cb *xor_hw_desc; | ||
| 906 | |||
| 907 | switch (chan->device->id) { | ||
| 908 | case PPC440SPE_DMA0_ID: | ||
| 909 | case PPC440SPE_DMA1_ID: | ||
| 910 | dma_hw_desc = desc->hw_desc; | ||
| 911 | |||
| 912 | if (likely(!idx)) | ||
| 913 | return le32_to_cpu(dma_hw_desc->sg2l); | ||
| 914 | return le32_to_cpu(dma_hw_desc->sg3l); | ||
| 915 | case PPC440SPE_XOR_ID: | ||
| 916 | xor_hw_desc = desc->hw_desc; | ||
| 917 | return xor_hw_desc->cbtal; | ||
| 918 | } | ||
| 919 | return 0; | ||
| 920 | } | ||
| 921 | |||
| 922 | /** | ||
| 923 | * ppc440spe_desc_get_src_num - extract the number of source addresses from | ||
| 924 | * the descriptor | ||
| 925 | */ | ||
| 926 | static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc, | ||
| 927 | struct ppc440spe_adma_chan *chan) | ||
| 928 | { | ||
| 929 | struct dma_cdb *dma_hw_desc; | ||
| 930 | struct xor_cb *xor_hw_desc; | ||
| 931 | |||
| 932 | switch (chan->device->id) { | ||
| 933 | case PPC440SPE_DMA0_ID: | ||
| 934 | case PPC440SPE_DMA1_ID: | ||
| 935 | dma_hw_desc = desc->hw_desc; | ||
| 936 | |||
| 937 | switch (dma_hw_desc->opc) { | ||
| 938 | case DMA_CDB_OPC_NO_OP: | ||
| 939 | case DMA_CDB_OPC_DFILL128: | ||
| 940 | return 0; | ||
| 941 | case DMA_CDB_OPC_DCHECK128: | ||
| 942 | return 1; | ||
| 943 | case DMA_CDB_OPC_MV_SG1_SG2: | ||
| 944 | case DMA_CDB_OPC_MULTICAST: | ||
| 945 | /* | ||
| 946 | * Only for RXOR operations we have more than | ||
| 947 | * one source | ||
| 948 | */ | ||
| 949 | if (le32_to_cpu(dma_hw_desc->sg1u) & | ||
| 950 | DMA_CUED_XOR_WIN_MSK) { | ||
| 951 | /* RXOR op, there are 2 or 3 sources */ | ||
| 952 | if (((le32_to_cpu(dma_hw_desc->sg1u) >> | ||
| 953 | DMA_CUED_REGION_OFF) & | ||
| 954 | DMA_CUED_REGION_MSK) == DMA_RXOR12) { | ||
| 955 | /* RXOR 1-2 */ | ||
| 956 | return 2; | ||
| 957 | } else { | ||
| 958 | /* RXOR 1-2-3/1-2-4/1-2-5 */ | ||
| 959 | return 3; | ||
| 960 | } | ||
| 961 | } | ||
| 962 | return 1; | ||
| 963 | default: | ||
| 964 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | ||
| 965 | __func__, dma_hw_desc->opc); | ||
| 966 | BUG(); | ||
| 967 | } | ||
| 968 | case PPC440SPE_XOR_ID: | ||
| 969 | /* up to 16 sources */ | ||
| 970 | xor_hw_desc = desc->hw_desc; | ||
| 971 | return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK; | ||
| 972 | default: | ||
| 973 | BUG(); | ||
| 974 | } | ||
| 975 | return 0; | ||
| 976 | } | ||
| 977 | |||
| 978 | /** | ||
| 979 | * ppc440spe_desc_get_dst_num - get the number of destination addresses in | ||
| 980 | * this descriptor | ||
| 981 | */ | ||
| 982 | static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc, | ||
| 983 | struct ppc440spe_adma_chan *chan) | ||
| 984 | { | ||
| 985 | struct dma_cdb *dma_hw_desc; | ||
| 986 | |||
| 987 | switch (chan->device->id) { | ||
| 988 | case PPC440SPE_DMA0_ID: | ||
| 989 | case PPC440SPE_DMA1_ID: | ||
| 990 | /* May be 1 or 2 destinations */ | ||
| 991 | dma_hw_desc = desc->hw_desc; | ||
| 992 | switch (dma_hw_desc->opc) { | ||
| 993 | case DMA_CDB_OPC_NO_OP: | ||
| 994 | case DMA_CDB_OPC_DCHECK128: | ||
| 995 | return 0; | ||
| 996 | case DMA_CDB_OPC_MV_SG1_SG2: | ||
| 997 | case DMA_CDB_OPC_DFILL128: | ||
| 998 | return 1; | ||
| 999 | case DMA_CDB_OPC_MULTICAST: | ||
| 1000 | if (desc->dst_cnt == 2) | ||
| 1001 | return 2; | ||
| 1002 | else | ||
| 1003 | return 1; | ||
| 1004 | default: | ||
| 1005 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | ||
| 1006 | __func__, dma_hw_desc->opc); | ||
| 1007 | BUG(); | ||
| 1008 | } | ||
| 1009 | case PPC440SPE_XOR_ID: | ||
| 1010 | /* Always only 1 destination */ | ||
| 1011 | return 1; | ||
| 1012 | default: | ||
| 1013 | BUG(); | ||
| 1014 | } | ||
| 1015 | return 0; | ||
| 1016 | } | ||
| 1017 | |||
| 1018 | /** | ||
| 1019 | * ppc440spe_desc_get_link - get the address of the descriptor that | 807 | * ppc440spe_desc_get_link - get the address of the descriptor that |
| 1020 | * follows this one | 808 | * follows this one |
| 1021 | */ | 809 | */ |
| @@ -1707,43 +1495,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot, | |||
| 1707 | } | 1495 | } |
| 1708 | } | 1496 | } |
| 1709 | 1497 | ||
| 1710 | static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan, | ||
| 1711 | struct ppc440spe_adma_desc_slot *desc) | ||
| 1712 | { | ||
| 1713 | u32 src_cnt, dst_cnt; | ||
| 1714 | dma_addr_t addr; | ||
| 1715 | |||
| 1716 | /* | ||
| 1717 | * get the number of sources & destination | ||
| 1718 | * included in this descriptor and unmap | ||
| 1719 | * them all | ||
| 1720 | */ | ||
| 1721 | src_cnt = ppc440spe_desc_get_src_num(desc, chan); | ||
| 1722 | dst_cnt = ppc440spe_desc_get_dst_num(desc, chan); | ||
| 1723 | |||
| 1724 | /* unmap destinations */ | ||
| 1725 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
| 1726 | while (dst_cnt--) { | ||
| 1727 | addr = ppc440spe_desc_get_dest_addr( | ||
| 1728 | desc, chan, dst_cnt); | ||
| 1729 | dma_unmap_page(chan->device->dev, | ||
| 1730 | addr, desc->unmap_len, | ||
| 1731 | DMA_FROM_DEVICE); | ||
| 1732 | } | ||
| 1733 | } | ||
| 1734 | |||
| 1735 | /* unmap sources */ | ||
| 1736 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 1737 | while (src_cnt--) { | ||
| 1738 | addr = ppc440spe_desc_get_src_addr( | ||
| 1739 | desc, chan, src_cnt); | ||
| 1740 | dma_unmap_page(chan->device->dev, | ||
| 1741 | addr, desc->unmap_len, | ||
| 1742 | DMA_TO_DEVICE); | ||
| 1743 | } | ||
| 1744 | } | ||
| 1745 | } | ||
| 1746 | |||
| 1747 | /** | 1498 | /** |
| 1748 | * ppc440spe_adma_run_tx_complete_actions - call functions to be called | 1499 | * ppc440spe_adma_run_tx_complete_actions - call functions to be called |
| 1749 | * upon completion | 1500 | * upon completion |
| @@ -1767,26 +1518,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( | |||
| 1767 | desc->async_tx.callback( | 1518 | desc->async_tx.callback( |
| 1768 | desc->async_tx.callback_param); | 1519 | desc->async_tx.callback_param); |
| 1769 | 1520 | ||
| 1770 | /* unmap dma addresses | 1521 | dma_descriptor_unmap(&desc->async_tx); |
| 1771 | * (unmap_single vs unmap_page?) | ||
| 1772 | * | ||
| 1773 | * actually, ppc's dma_unmap_page() functions are empty, so | ||
| 1774 | * the following code is just for the sake of completeness | ||
| 1775 | */ | ||
| 1776 | if (chan && chan->needs_unmap && desc->group_head && | ||
| 1777 | desc->unmap_len) { | ||
| 1778 | struct ppc440spe_adma_desc_slot *unmap = | ||
| 1779 | desc->group_head; | ||
| 1780 | /* assume 1 slot per op always */ | ||
| 1781 | u32 slot_count = unmap->slot_cnt; | ||
| 1782 | |||
| 1783 | /* Run through the group list and unmap addresses */ | ||
| 1784 | for (i = 0; i < slot_count; i++) { | ||
| 1785 | BUG_ON(!unmap); | ||
| 1786 | ppc440spe_adma_unmap(chan, unmap); | ||
| 1787 | unmap = unmap->hw_next; | ||
| 1788 | } | ||
| 1789 | } | ||
| 1790 | } | 1522 | } |
| 1791 | 1523 | ||
| 1792 | /* run dependent operations */ | 1524 | /* run dependent operations */ |
| @@ -3893,7 +3625,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, | |||
| 3893 | 3625 | ||
| 3894 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | 3626 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); |
| 3895 | ret = dma_cookie_status(chan, cookie, txstate); | 3627 | ret = dma_cookie_status(chan, cookie, txstate); |
| 3896 | if (ret == DMA_SUCCESS) | 3628 | if (ret == DMA_COMPLETE) |
| 3897 | return ret; | 3629 | return ret; |
| 3898 | 3630 | ||
| 3899 | ppc440spe_adma_slot_cleanup(ppc440spe_chan); | 3631 | ppc440spe_adma_slot_cleanup(ppc440spe_chan); |
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 461a91ab70bb..ab26d46bbe15 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c | |||
| @@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, | |||
| 436 | enum dma_status ret; | 436 | enum dma_status ret; |
| 437 | 437 | ||
| 438 | ret = dma_cookie_status(&c->vc.chan, cookie, state); | 438 | ret = dma_cookie_status(&c->vc.chan, cookie, state); |
| 439 | if (ret == DMA_SUCCESS) | 439 | if (ret == DMA_COMPLETE) |
| 440 | return ret; | 440 | return ret; |
| 441 | 441 | ||
| 442 | if (!state) | 442 | if (!state) |
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index d94ab592cc1b..2e7b394def80 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c | |||
| @@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan, | |||
| 724 | * If we don't find cookie on the queue, it has been aborted and we have | 724 | * If we don't find cookie on the queue, it has been aborted and we have |
| 725 | * to report error | 725 | * to report error |
| 726 | */ | 726 | */ |
| 727 | if (status != DMA_SUCCESS) { | 727 | if (status != DMA_COMPLETE) { |
| 728 | struct shdma_desc *sdesc; | 728 | struct shdma_desc *sdesc; |
| 729 | status = DMA_ERROR; | 729 | status = DMA_ERROR; |
| 730 | list_for_each_entry(sdesc, &schan->ld_queue, node) | 730 | list_for_each_entry(sdesc, &schan->ld_queue, node) |
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 1069e8869f20..0d765c0e21ec 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c | |||
| @@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match); | |||
| 685 | static int sh_dmae_probe(struct platform_device *pdev) | 685 | static int sh_dmae_probe(struct platform_device *pdev) |
| 686 | { | 686 | { |
| 687 | const struct sh_dmae_pdata *pdata; | 687 | const struct sh_dmae_pdata *pdata; |
| 688 | unsigned long irqflags = IRQF_DISABLED, | 688 | unsigned long irqflags = 0, |
| 689 | chan_flag[SH_DMAE_MAX_CHANNELS] = {}; | 689 | chan_flag[SH_DMAE_MAX_CHANNELS] = {}; |
| 690 | int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; | 690 | int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; |
| 691 | int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; | 691 | int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; |
| @@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev) | |||
| 838 | IORESOURCE_IRQ_SHAREABLE) | 838 | IORESOURCE_IRQ_SHAREABLE) |
| 839 | chan_flag[irq_cnt] = IRQF_SHARED; | 839 | chan_flag[irq_cnt] = IRQF_SHARED; |
| 840 | else | 840 | else |
| 841 | chan_flag[irq_cnt] = IRQF_DISABLED; | 841 | chan_flag[irq_cnt] = 0; |
| 842 | dev_dbg(&pdev->dev, | 842 | dev_dbg(&pdev->dev, |
| 843 | "Found IRQ %d for channel %d\n", | 843 | "Found IRQ %d for channel %d\n", |
| 844 | i, irq_cnt); | 844 | i, irq_cnt); |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 82d2b97ad942..b8c031b7de4e 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
| 16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
| 17 | #include <linux/log2.h> | ||
| 17 | #include <linux/pm.h> | 18 | #include <linux/pm.h> |
| 18 | #include <linux/pm_runtime.h> | 19 | #include <linux/pm_runtime.h> |
| 19 | #include <linux/err.h> | 20 | #include <linux/err.h> |
| @@ -2626,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, | |||
| 2626 | } | 2627 | } |
| 2627 | 2628 | ||
| 2628 | ret = dma_cookie_status(chan, cookie, txstate); | 2629 | ret = dma_cookie_status(chan, cookie, txstate); |
| 2629 | if (ret != DMA_SUCCESS) | 2630 | if (ret != DMA_COMPLETE) |
| 2630 | dma_set_residue(txstate, stedma40_residue(chan)); | 2631 | dma_set_residue(txstate, stedma40_residue(chan)); |
| 2631 | 2632 | ||
| 2632 | if (d40_is_paused(d40c)) | 2633 | if (d40_is_paused(d40c)) |
| @@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
| 2796 | src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || | 2797 | src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || |
| 2797 | dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || | 2798 | dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || |
| 2798 | dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || | 2799 | dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || |
| 2799 | ((src_addr_width > 1) && (src_addr_width & 1)) || | 2800 | !is_power_of_2(src_addr_width) || |
| 2800 | ((dst_addr_width > 1) && (dst_addr_width & 1))) | 2801 | !is_power_of_2(dst_addr_width)) |
| 2801 | return -EINVAL; | 2802 | return -EINVAL; |
| 2802 | 2803 | ||
| 2803 | cfg->src_info.data_width = src_addr_width; | 2804 | cfg->src_info.data_width = src_addr_width; |
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 5d4986e5f5fa..73654e33f13b 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
| @@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc, | |||
| 570 | 570 | ||
| 571 | list_del(&sgreq->node); | 571 | list_del(&sgreq->node); |
| 572 | if (sgreq->last_sg) { | 572 | if (sgreq->last_sg) { |
| 573 | dma_desc->dma_status = DMA_SUCCESS; | 573 | dma_desc->dma_status = DMA_COMPLETE; |
| 574 | dma_cookie_complete(&dma_desc->txd); | 574 | dma_cookie_complete(&dma_desc->txd); |
| 575 | if (!dma_desc->cb_count) | 575 | if (!dma_desc->cb_count) |
| 576 | list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); | 576 | list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); |
| @@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, | |||
| 768 | unsigned int residual; | 768 | unsigned int residual; |
| 769 | 769 | ||
| 770 | ret = dma_cookie_status(dc, cookie, txstate); | 770 | ret = dma_cookie_status(dc, cookie, txstate); |
| 771 | if (ret == DMA_SUCCESS) | 771 | if (ret == DMA_COMPLETE) |
| 772 | return ret; | 772 | return ret; |
| 773 | 773 | ||
| 774 | spin_lock_irqsave(&tdc->lock, flags); | 774 | spin_lock_irqsave(&tdc->lock, flags); |
| @@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( | |||
| 1018 | return &dma_desc->txd; | 1018 | return &dma_desc->txd; |
| 1019 | } | 1019 | } |
| 1020 | 1020 | ||
| 1021 | struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( | 1021 | static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( |
| 1022 | struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, | 1022 | struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, |
| 1023 | size_t period_len, enum dma_transfer_direction direction, | 1023 | size_t period_len, enum dma_transfer_direction direction, |
| 1024 | unsigned long flags, void *context) | 1024 | unsigned long flags, void *context) |
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 28af214fce04..4506a7b4f972 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
| @@ -154,38 +154,6 @@ static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) | |||
| 154 | return done; | 154 | return done; |
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, | ||
| 158 | bool single) | ||
| 159 | { | ||
| 160 | dma_addr_t addr; | ||
| 161 | int len; | ||
| 162 | |||
| 163 | addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) | | ||
| 164 | dma_desc[4]; | ||
| 165 | |||
| 166 | len = (dma_desc[3] << 8) | dma_desc[2]; | ||
| 167 | |||
| 168 | if (single) | ||
| 169 | dma_unmap_single(chan2dev(&td_chan->chan), addr, len, | ||
| 170 | DMA_TO_DEVICE); | ||
| 171 | else | ||
| 172 | dma_unmap_page(chan2dev(&td_chan->chan), addr, len, | ||
| 173 | DMA_TO_DEVICE); | ||
| 174 | } | ||
| 175 | |||
| 176 | static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) | ||
| 177 | { | ||
| 178 | struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan, | ||
| 179 | struct timb_dma_chan, chan); | ||
| 180 | u8 *descs; | ||
| 181 | |||
| 182 | for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) { | ||
| 183 | __td_unmap_desc(td_chan, descs, single); | ||
| 184 | if (descs[0] & 0x02) | ||
| 185 | break; | ||
| 186 | } | ||
| 187 | } | ||
| 188 | |||
| 189 | static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, | 157 | static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, |
| 190 | struct scatterlist *sg, bool last) | 158 | struct scatterlist *sg, bool last) |
| 191 | { | 159 | { |
| @@ -293,10 +261,7 @@ static void __td_finish(struct timb_dma_chan *td_chan) | |||
| 293 | 261 | ||
| 294 | list_move(&td_desc->desc_node, &td_chan->free_list); | 262 | list_move(&td_desc->desc_node, &td_chan->free_list); |
| 295 | 263 | ||
| 296 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) | 264 | dma_descriptor_unmap(txd); |
| 297 | __td_unmap_descs(td_desc, | ||
| 298 | txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); | ||
| 299 | |||
| 300 | /* | 265 | /* |
| 301 | * The API requires that no submissions are done from a | 266 | * The API requires that no submissions are done from a |
| 302 | * callback, so we don't need to drop the lock here | 267 | * callback, so we don't need to drop the lock here |
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 71e8e775189e..bae6c29f5502 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
| @@ -419,30 +419,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | |||
| 419 | list_splice_init(&desc->tx_list, &dc->free_list); | 419 | list_splice_init(&desc->tx_list, &dc->free_list); |
| 420 | list_move(&desc->desc_node, &dc->free_list); | 420 | list_move(&desc->desc_node, &dc->free_list); |
| 421 | 421 | ||
| 422 | if (!ds) { | 422 | dma_descriptor_unmap(txd); |
| 423 | dma_addr_t dmaaddr; | ||
| 424 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
| 425 | dmaaddr = is_dmac64(dc) ? | ||
| 426 | desc->hwdesc.DAR : desc->hwdesc32.DAR; | ||
| 427 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
| 428 | dma_unmap_single(chan2parent(&dc->chan), | ||
| 429 | dmaaddr, desc->len, DMA_FROM_DEVICE); | ||
| 430 | else | ||
| 431 | dma_unmap_page(chan2parent(&dc->chan), | ||
| 432 | dmaaddr, desc->len, DMA_FROM_DEVICE); | ||
| 433 | } | ||
| 434 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
| 435 | dmaaddr = is_dmac64(dc) ? | ||
| 436 | desc->hwdesc.SAR : desc->hwdesc32.SAR; | ||
| 437 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
| 438 | dma_unmap_single(chan2parent(&dc->chan), | ||
| 439 | dmaaddr, desc->len, DMA_TO_DEVICE); | ||
| 440 | else | ||
| 441 | dma_unmap_page(chan2parent(&dc->chan), | ||
| 442 | dmaaddr, desc->len, DMA_TO_DEVICE); | ||
| 443 | } | ||
| 444 | } | ||
| 445 | |||
| 446 | /* | 423 | /* |
| 447 | * The API requires that no submissions are done from a | 424 | * The API requires that no submissions are done from a |
| 448 | * callback, so we don't need to drop the lock here | 425 | * callback, so we don't need to drop the lock here |
| @@ -962,8 +939,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
| 962 | enum dma_status ret; | 939 | enum dma_status ret; |
| 963 | 940 | ||
| 964 | ret = dma_cookie_status(chan, cookie, txstate); | 941 | ret = dma_cookie_status(chan, cookie, txstate); |
| 965 | if (ret == DMA_SUCCESS) | 942 | if (ret == DMA_COMPLETE) |
| 966 | return DMA_SUCCESS; | 943 | return DMA_COMPLETE; |
| 967 | 944 | ||
| 968 | spin_lock_bh(&dc->lock); | 945 | spin_lock_bh(&dc->lock); |
| 969 | txx9dmac_scan_descriptors(dc); | 946 | txx9dmac_scan_descriptors(dc); |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 7dd446150294..4e10b10d3ddd 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/acpi_gpio.h> | 13 | #include <linux/acpi_gpio.h> |
| 14 | #include <linux/idr.h> | 14 | #include <linux/idr.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/acpi.h> | ||
| 16 | 17 | ||
| 17 | #define CREATE_TRACE_POINTS | 18 | #define CREATE_TRACE_POINTS |
| 18 | #include <trace/events/gpio.h> | 19 | #include <trace/events/gpio.h> |
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index 43959edd4291..dfff0907f70e 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c | |||
| @@ -196,7 +196,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev) | |||
| 196 | acpi_handle dhandle; | 196 | acpi_handle dhandle; |
| 197 | int ret; | 197 | int ret; |
| 198 | 198 | ||
| 199 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | 199 | dhandle = ACPI_HANDLE(&pdev->dev); |
| 200 | if (!dhandle) | 200 | if (!dhandle) |
| 201 | return false; | 201 | return false; |
| 202 | 202 | ||
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 1b2f41c3f191..6d69a9bad865 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
| @@ -638,7 +638,7 @@ static void intel_didl_outputs(struct drm_device *dev) | |||
| 638 | u32 temp; | 638 | u32 temp; |
| 639 | int i = 0; | 639 | int i = 0; |
| 640 | 640 | ||
| 641 | handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); | 641 | handle = ACPI_HANDLE(&dev->pdev->dev); |
| 642 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) | 642 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) |
| 643 | return; | 643 | return; |
| 644 | 644 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c index e286e132c7e7..129120473f6c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c | |||
| @@ -116,7 +116,7 @@ mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version) | |||
| 116 | acpi_handle handle; | 116 | acpi_handle handle; |
| 117 | int ret; | 117 | int ret; |
| 118 | 118 | ||
| 119 | handle = DEVICE_ACPI_HANDLE(&device->pdev->dev); | 119 | handle = ACPI_HANDLE(&device->pdev->dev); |
| 120 | if (!handle) | 120 | if (!handle) |
| 121 | return false; | 121 | return false; |
| 122 | 122 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 07273a2ae62f..95c740454049 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
| @@ -256,7 +256,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev) | |||
| 256 | acpi_handle dhandle; | 256 | acpi_handle dhandle; |
| 257 | int retval = 0; | 257 | int retval = 0; |
| 258 | 258 | ||
| 259 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | 259 | dhandle = ACPI_HANDLE(&pdev->dev); |
| 260 | if (!dhandle) | 260 | if (!dhandle) |
| 261 | return false; | 261 | return false; |
| 262 | 262 | ||
| @@ -414,7 +414,7 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev) | |||
| 414 | if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) | 414 | if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) |
| 415 | return false; | 415 | return false; |
| 416 | 416 | ||
| 417 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | 417 | dhandle = ACPI_HANDLE(&pdev->dev); |
| 418 | if (!dhandle) | 418 | if (!dhandle) |
| 419 | return false; | 419 | return false; |
| 420 | 420 | ||
| @@ -448,7 +448,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) | |||
| 448 | return NULL; | 448 | return NULL; |
| 449 | } | 449 | } |
| 450 | 450 | ||
| 451 | handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); | 451 | handle = ACPI_HANDLE(&dev->pdev->dev); |
| 452 | if (!handle) | 452 | if (!handle) |
| 453 | return NULL; | 453 | return NULL; |
| 454 | 454 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index 10f98c7742d8..98a9074b306b 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c | |||
| @@ -369,7 +369,7 @@ int radeon_atif_handler(struct radeon_device *rdev, | |||
| 369 | return NOTIFY_DONE; | 369 | return NOTIFY_DONE; |
| 370 | 370 | ||
| 371 | /* Check pending SBIOS requests */ | 371 | /* Check pending SBIOS requests */ |
| 372 | handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); | 372 | handle = ACPI_HANDLE(&rdev->pdev->dev); |
| 373 | count = radeon_atif_get_sbios_requests(handle, &req); | 373 | count = radeon_atif_get_sbios_requests(handle, &req); |
| 374 | 374 | ||
| 375 | if (count <= 0) | 375 | if (count <= 0) |
| @@ -556,7 +556,7 @@ int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev) | |||
| 556 | struct radeon_atcs *atcs = &rdev->atcs; | 556 | struct radeon_atcs *atcs = &rdev->atcs; |
| 557 | 557 | ||
| 558 | /* Get the device handle */ | 558 | /* Get the device handle */ |
| 559 | handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); | 559 | handle = ACPI_HANDLE(&rdev->pdev->dev); |
| 560 | if (!handle) | 560 | if (!handle) |
| 561 | return -EINVAL; | 561 | return -EINVAL; |
| 562 | 562 | ||
| @@ -596,7 +596,7 @@ int radeon_acpi_pcie_performance_request(struct radeon_device *rdev, | |||
| 596 | u32 retry = 3; | 596 | u32 retry = 3; |
| 597 | 597 | ||
| 598 | /* Get the device handle */ | 598 | /* Get the device handle */ |
| 599 | handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); | 599 | handle = ACPI_HANDLE(&rdev->pdev->dev); |
| 600 | if (!handle) | 600 | if (!handle) |
| 601 | return -EINVAL; | 601 | return -EINVAL; |
| 602 | 602 | ||
| @@ -699,7 +699,7 @@ int radeon_acpi_init(struct radeon_device *rdev) | |||
| 699 | int ret; | 699 | int ret; |
| 700 | 700 | ||
| 701 | /* Get the device handle */ | 701 | /* Get the device handle */ |
| 702 | handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); | 702 | handle = ACPI_HANDLE(&rdev->pdev->dev); |
| 703 | 703 | ||
| 704 | /* No need to proceed if we're sure that ATIF is not supported */ | 704 | /* No need to proceed if we're sure that ATIF is not supported */ |
| 705 | if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) | 705 | if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) |
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 6153ec18943a..9d302eaeea15 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c | |||
| @@ -8,8 +8,7 @@ | |||
| 8 | */ | 8 | */ |
| 9 | #include <linux/vga_switcheroo.h> | 9 | #include <linux/vga_switcheroo.h> |
| 10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
| 11 | #include <acpi/acpi.h> | 11 | #include <linux/acpi.h> |
| 12 | #include <acpi/acpi_bus.h> | ||
| 13 | #include <linux/pci.h> | 12 | #include <linux/pci.h> |
| 14 | 13 | ||
| 15 | #include "radeon_acpi.h" | 14 | #include "radeon_acpi.h" |
| @@ -447,7 +446,7 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) | |||
| 447 | acpi_handle dhandle, atpx_handle; | 446 | acpi_handle dhandle, atpx_handle; |
| 448 | acpi_status status; | 447 | acpi_status status; |
| 449 | 448 | ||
| 450 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | 449 | dhandle = ACPI_HANDLE(&pdev->dev); |
| 451 | if (!dhandle) | 450 | if (!dhandle) |
| 452 | return false; | 451 | return false; |
| 453 | 452 | ||
| @@ -493,7 +492,7 @@ static int radeon_atpx_init(void) | |||
| 493 | */ | 492 | */ |
| 494 | static int radeon_atpx_get_client_id(struct pci_dev *pdev) | 493 | static int radeon_atpx_get_client_id(struct pci_dev *pdev) |
| 495 | { | 494 | { |
| 496 | if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) | 495 | if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev)) |
| 497 | return VGA_SWITCHEROO_IGD; | 496 | return VGA_SWITCHEROO_IGD; |
| 498 | else | 497 | else |
| 499 | return VGA_SWITCHEROO_DIS; | 498 | return VGA_SWITCHEROO_DIS; |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index c155d6f3fa68..b3633d9a5317 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
| @@ -185,7 +185,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) | |||
| 185 | return false; | 185 | return false; |
| 186 | 186 | ||
| 187 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { | 187 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { |
| 188 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | 188 | dhandle = ACPI_HANDLE(&pdev->dev); |
| 189 | if (!dhandle) | 189 | if (!dhandle) |
| 190 | continue; | 190 | continue; |
| 191 | 191 | ||
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index ae48d18ee315..5f7e55f4b7f0 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c | |||
| @@ -1008,7 +1008,7 @@ static int i2c_hid_probe(struct i2c_client *client, | |||
| 1008 | hid->hid_get_raw_report = i2c_hid_get_raw_report; | 1008 | hid->hid_get_raw_report = i2c_hid_get_raw_report; |
| 1009 | hid->hid_output_raw_report = i2c_hid_output_raw_report; | 1009 | hid->hid_output_raw_report = i2c_hid_output_raw_report; |
| 1010 | hid->dev.parent = &client->dev; | 1010 | hid->dev.parent = &client->dev; |
| 1011 | ACPI_HANDLE_SET(&hid->dev, ACPI_HANDLE(&client->dev)); | 1011 | ACPI_COMPANION_SET(&hid->dev, ACPI_COMPANION(&client->dev)); |
| 1012 | hid->bus = BUS_I2C; | 1012 | hid->bus = BUS_I2C; |
| 1013 | hid->version = le16_to_cpu(ihid->hdesc.bcdVersion); | 1013 | hid->version = le16_to_cpu(ihid->hdesc.bcdVersion); |
| 1014 | hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); | 1014 | hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 5923cfa390c8..d74c0b34248e 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
| @@ -615,6 +615,22 @@ void i2c_unlock_adapter(struct i2c_adapter *adapter) | |||
| 615 | } | 615 | } |
| 616 | EXPORT_SYMBOL_GPL(i2c_unlock_adapter); | 616 | EXPORT_SYMBOL_GPL(i2c_unlock_adapter); |
| 617 | 617 | ||
| 618 | static void i2c_dev_set_name(struct i2c_adapter *adap, | ||
| 619 | struct i2c_client *client) | ||
| 620 | { | ||
| 621 | struct acpi_device *adev = ACPI_COMPANION(&client->dev); | ||
| 622 | |||
| 623 | if (adev) { | ||
| 624 | dev_set_name(&client->dev, "i2c-%s", acpi_dev_name(adev)); | ||
| 625 | return; | ||
| 626 | } | ||
| 627 | |||
| 628 | /* For 10-bit clients, add an arbitrary offset to avoid collisions */ | ||
| 629 | dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), | ||
| 630 | client->addr | ((client->flags & I2C_CLIENT_TEN) | ||
| 631 | ? 0xa000 : 0)); | ||
| 632 | } | ||
| 633 | |||
| 618 | /** | 634 | /** |
| 619 | * i2c_new_device - instantiate an i2c device | 635 | * i2c_new_device - instantiate an i2c device |
| 620 | * @adap: the adapter managing the device | 636 | * @adap: the adapter managing the device |
| @@ -671,12 +687,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) | |||
| 671 | client->dev.bus = &i2c_bus_type; | 687 | client->dev.bus = &i2c_bus_type; |
| 672 | client->dev.type = &i2c_client_type; | 688 | client->dev.type = &i2c_client_type; |
| 673 | client->dev.of_node = info->of_node; | 689 | client->dev.of_node = info->of_node; |
| 674 | ACPI_HANDLE_SET(&client->dev, info->acpi_node.handle); | 690 | ACPI_COMPANION_SET(&client->dev, info->acpi_node.companion); |
| 675 | 691 | ||
| 676 | /* For 10-bit clients, add an arbitrary offset to avoid collisions */ | 692 | i2c_dev_set_name(adap, client); |
| 677 | dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), | ||
| 678 | client->addr | ((client->flags & I2C_CLIENT_TEN) | ||
| 679 | ? 0xa000 : 0)); | ||
| 680 | status = device_register(&client->dev); | 693 | status = device_register(&client->dev); |
| 681 | if (status) | 694 | if (status) |
| 682 | goto out_err; | 695 | goto out_err; |
| @@ -1100,7 +1113,7 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, | |||
| 1100 | return AE_OK; | 1113 | return AE_OK; |
| 1101 | 1114 | ||
| 1102 | memset(&info, 0, sizeof(info)); | 1115 | memset(&info, 0, sizeof(info)); |
| 1103 | info.acpi_node.handle = handle; | 1116 | info.acpi_node.companion = adev; |
| 1104 | info.irq = -1; | 1117 | info.irq = -1; |
| 1105 | 1118 | ||
| 1106 | INIT_LIST_HEAD(&resource_list); | 1119 | INIT_LIST_HEAD(&resource_list); |
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index 140c8ef50529..d9e1f7ccfe6f 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | * Copyright (C) 2006 Hannes Reinecke | 7 | * Copyright (C) 2006 Hannes Reinecke |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #include <linux/acpi.h> | ||
| 10 | #include <linux/ata.h> | 11 | #include <linux/ata.h> |
| 11 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
| 12 | #include <linux/device.h> | 13 | #include <linux/device.h> |
| @@ -19,8 +20,6 @@ | |||
| 19 | #include <linux/dmi.h> | 20 | #include <linux/dmi.h> |
| 20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 21 | 22 | ||
| 22 | #include <acpi/acpi_bus.h> | ||
| 23 | |||
| 24 | #define REGS_PER_GTF 7 | 23 | #define REGS_PER_GTF 7 |
| 25 | 24 | ||
| 26 | struct GTM_buffer { | 25 | struct GTM_buffer { |
| @@ -128,7 +127,7 @@ static int ide_get_dev_handle(struct device *dev, acpi_handle *handle, | |||
| 128 | 127 | ||
| 129 | DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func); | 128 | DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func); |
| 130 | 129 | ||
| 131 | dev_handle = DEVICE_ACPI_HANDLE(dev); | 130 | dev_handle = ACPI_HANDLE(dev); |
| 132 | if (!dev_handle) { | 131 | if (!dev_handle) { |
| 133 | DEBPRINT("no acpi handle for device\n"); | 132 | DEBPRINT("no acpi handle for device\n"); |
| 134 | goto err; | 133 | goto err; |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 3226ce98fb18..cbd4e9abc47e 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * intel_idle.c - native hardware idle loop for modern Intel processors | 2 | * intel_idle.c - native hardware idle loop for modern Intel processors |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2010, Intel Corporation. | 4 | * Copyright (c) 2013, Intel Corporation. |
| 5 | * Len Brown <len.brown@intel.com> | 5 | * Len Brown <len.brown@intel.com> |
| 6 | * | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
| @@ -329,6 +329,22 @@ static struct cpuidle_state atom_cstates[] __initdata = { | |||
| 329 | { | 329 | { |
| 330 | .enter = NULL } | 330 | .enter = NULL } |
| 331 | }; | 331 | }; |
| 332 | static struct cpuidle_state avn_cstates[CPUIDLE_STATE_MAX] = { | ||
| 333 | { | ||
| 334 | .name = "C1-AVN", | ||
| 335 | .desc = "MWAIT 0x00", | ||
| 336 | .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, | ||
| 337 | .exit_latency = 2, | ||
| 338 | .target_residency = 2, | ||
| 339 | .enter = &intel_idle }, | ||
| 340 | { | ||
| 341 | .name = "C6-AVN", | ||
| 342 | .desc = "MWAIT 0x51", | ||
| 343 | .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | ||
| 344 | .exit_latency = 15, | ||
| 345 | .target_residency = 45, | ||
| 346 | .enter = &intel_idle }, | ||
| 347 | }; | ||
| 332 | 348 | ||
| 333 | /** | 349 | /** |
| 334 | * intel_idle | 350 | * intel_idle |
| @@ -462,6 +478,11 @@ static const struct idle_cpu idle_cpu_hsw = { | |||
| 462 | .disable_promotion_to_c1e = true, | 478 | .disable_promotion_to_c1e = true, |
| 463 | }; | 479 | }; |
| 464 | 480 | ||
| 481 | static const struct idle_cpu idle_cpu_avn = { | ||
| 482 | .state_table = avn_cstates, | ||
| 483 | .disable_promotion_to_c1e = true, | ||
| 484 | }; | ||
| 485 | |||
| 465 | #define ICPU(model, cpu) \ | 486 | #define ICPU(model, cpu) \ |
| 466 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } | 487 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } |
| 467 | 488 | ||
| @@ -483,6 +504,7 @@ static const struct x86_cpu_id intel_idle_ids[] = { | |||
| 483 | ICPU(0x3f, idle_cpu_hsw), | 504 | ICPU(0x3f, idle_cpu_hsw), |
| 484 | ICPU(0x45, idle_cpu_hsw), | 505 | ICPU(0x45, idle_cpu_hsw), |
| 485 | ICPU(0x46, idle_cpu_hsw), | 506 | ICPU(0x46, idle_cpu_hsw), |
| 507 | ICPU(0x4D, idle_cpu_avn), | ||
| 486 | {} | 508 | {} |
| 487 | }; | 509 | }; |
| 488 | MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); | 510 | MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); |
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index baf2686aa8eb..02125e6a9109 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c | |||
| @@ -1083,8 +1083,10 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp) | |||
| 1083 | spin_unlock_irqrestore(&card->isdnloop_lock, flags); | 1083 | spin_unlock_irqrestore(&card->isdnloop_lock, flags); |
| 1084 | return -ENOMEM; | 1084 | return -ENOMEM; |
| 1085 | } | 1085 | } |
| 1086 | for (i = 0; i < 3; i++) | 1086 | for (i = 0; i < 3; i++) { |
| 1087 | strcpy(card->s0num[i], sdef.num[i]); | 1087 | strlcpy(card->s0num[i], sdef.num[i], |
| 1088 | sizeof(card->s0num[0])); | ||
| 1089 | } | ||
| 1088 | break; | 1090 | break; |
| 1089 | case ISDN_PTYPE_1TR6: | 1091 | case ISDN_PTYPE_1TR6: |
| 1090 | if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95", | 1092 | if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95", |
| @@ -1097,7 +1099,7 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp) | |||
| 1097 | spin_unlock_irqrestore(&card->isdnloop_lock, flags); | 1099 | spin_unlock_irqrestore(&card->isdnloop_lock, flags); |
| 1098 | return -ENOMEM; | 1100 | return -ENOMEM; |
| 1099 | } | 1101 | } |
| 1100 | strcpy(card->s0num[0], sdef.num[0]); | 1102 | strlcpy(card->s0num[0], sdef.num[0], sizeof(card->s0num[0])); |
| 1101 | card->s0num[1][0] = '\0'; | 1103 | card->s0num[1][0] = '\0'; |
| 1102 | card->s0num[2][0] = '\0'; | 1104 | card->s0num[2][0] = '\0'; |
| 1103 | break; | 1105 | break; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 8766eabb0014..b6b7a2866c9e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -112,7 +112,7 @@ static inline int speed_max(struct mddev *mddev) | |||
| 112 | 112 | ||
| 113 | static struct ctl_table_header *raid_table_header; | 113 | static struct ctl_table_header *raid_table_header; |
| 114 | 114 | ||
| 115 | static ctl_table raid_table[] = { | 115 | static struct ctl_table raid_table[] = { |
| 116 | { | 116 | { |
| 117 | .procname = "speed_limit_min", | 117 | .procname = "speed_limit_min", |
| 118 | .data = &sysctl_speed_limit_min, | 118 | .data = &sysctl_speed_limit_min, |
| @@ -130,7 +130,7 @@ static ctl_table raid_table[] = { | |||
| 130 | { } | 130 | { } |
| 131 | }; | 131 | }; |
| 132 | 132 | ||
| 133 | static ctl_table raid_dir_table[] = { | 133 | static struct ctl_table raid_dir_table[] = { |
| 134 | { | 134 | { |
| 135 | .procname = "raid", | 135 | .procname = "raid", |
| 136 | .maxlen = 0, | 136 | .maxlen = 0, |
| @@ -140,7 +140,7 @@ static ctl_table raid_dir_table[] = { | |||
| 140 | { } | 140 | { } |
| 141 | }; | 141 | }; |
| 142 | 142 | ||
| 143 | static ctl_table raid_root_table[] = { | 143 | static struct ctl_table raid_root_table[] = { |
| 144 | { | 144 | { |
| 145 | .procname = "dev", | 145 | .procname = "dev", |
| 146 | .maxlen = 0, | 146 | .maxlen = 0, |
| @@ -562,11 +562,19 @@ static struct mddev * mddev_find(dev_t unit) | |||
| 562 | goto retry; | 562 | goto retry; |
| 563 | } | 563 | } |
| 564 | 564 | ||
| 565 | static inline int mddev_lock(struct mddev * mddev) | 565 | static inline int __must_check mddev_lock(struct mddev * mddev) |
| 566 | { | 566 | { |
| 567 | return mutex_lock_interruptible(&mddev->reconfig_mutex); | 567 | return mutex_lock_interruptible(&mddev->reconfig_mutex); |
| 568 | } | 568 | } |
| 569 | 569 | ||
| 570 | /* Sometimes we need to take the lock in a situation where | ||
| 571 | * failure due to interrupts is not acceptable. | ||
| 572 | */ | ||
| 573 | static inline void mddev_lock_nointr(struct mddev * mddev) | ||
| 574 | { | ||
| 575 | mutex_lock(&mddev->reconfig_mutex); | ||
| 576 | } | ||
| 577 | |||
| 570 | static inline int mddev_is_locked(struct mddev *mddev) | 578 | static inline int mddev_is_locked(struct mddev *mddev) |
| 571 | { | 579 | { |
| 572 | return mutex_is_locked(&mddev->reconfig_mutex); | 580 | return mutex_is_locked(&mddev->reconfig_mutex); |
| @@ -2978,7 +2986,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) | |||
| 2978 | for_each_mddev(mddev, tmp) { | 2986 | for_each_mddev(mddev, tmp) { |
| 2979 | struct md_rdev *rdev2; | 2987 | struct md_rdev *rdev2; |
| 2980 | 2988 | ||
| 2981 | mddev_lock(mddev); | 2989 | mddev_lock_nointr(mddev); |
| 2982 | rdev_for_each(rdev2, mddev) | 2990 | rdev_for_each(rdev2, mddev) |
| 2983 | if (rdev->bdev == rdev2->bdev && | 2991 | if (rdev->bdev == rdev2->bdev && |
| 2984 | rdev != rdev2 && | 2992 | rdev != rdev2 && |
| @@ -2994,7 +3002,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) | |||
| 2994 | break; | 3002 | break; |
| 2995 | } | 3003 | } |
| 2996 | } | 3004 | } |
| 2997 | mddev_lock(my_mddev); | 3005 | mddev_lock_nointr(my_mddev); |
| 2998 | if (overlap) { | 3006 | if (overlap) { |
| 2999 | /* Someone else could have slipped in a size | 3007 | /* Someone else could have slipped in a size |
| 3000 | * change here, but doing so is just silly. | 3008 | * change here, but doing so is just silly. |
| @@ -3580,6 +3588,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) | |||
| 3580 | mddev->in_sync = 1; | 3588 | mddev->in_sync = 1; |
| 3581 | del_timer_sync(&mddev->safemode_timer); | 3589 | del_timer_sync(&mddev->safemode_timer); |
| 3582 | } | 3590 | } |
| 3591 | blk_set_stacking_limits(&mddev->queue->limits); | ||
| 3583 | pers->run(mddev); | 3592 | pers->run(mddev); |
| 3584 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | 3593 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
| 3585 | mddev_resume(mddev); | 3594 | mddev_resume(mddev); |
| @@ -5258,7 +5267,7 @@ static void __md_stop_writes(struct mddev *mddev) | |||
| 5258 | 5267 | ||
| 5259 | void md_stop_writes(struct mddev *mddev) | 5268 | void md_stop_writes(struct mddev *mddev) |
| 5260 | { | 5269 | { |
| 5261 | mddev_lock(mddev); | 5270 | mddev_lock_nointr(mddev); |
| 5262 | __md_stop_writes(mddev); | 5271 | __md_stop_writes(mddev); |
| 5263 | mddev_unlock(mddev); | 5272 | mddev_unlock(mddev); |
| 5264 | } | 5273 | } |
| @@ -5291,20 +5300,35 @@ EXPORT_SYMBOL_GPL(md_stop); | |||
| 5291 | static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) | 5300 | static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) |
| 5292 | { | 5301 | { |
| 5293 | int err = 0; | 5302 | int err = 0; |
| 5303 | int did_freeze = 0; | ||
| 5304 | |||
| 5305 | if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { | ||
| 5306 | did_freeze = 1; | ||
| 5307 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | ||
| 5308 | md_wakeup_thread(mddev->thread); | ||
| 5309 | } | ||
| 5310 | if (mddev->sync_thread) { | ||
| 5311 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | ||
| 5312 | /* Thread might be blocked waiting for metadata update | ||
| 5313 | * which will now never happen */ | ||
| 5314 | wake_up_process(mddev->sync_thread->tsk); | ||
| 5315 | } | ||
| 5316 | mddev_unlock(mddev); | ||
| 5317 | wait_event(resync_wait, mddev->sync_thread == NULL); | ||
| 5318 | mddev_lock_nointr(mddev); | ||
| 5319 | |||
| 5294 | mutex_lock(&mddev->open_mutex); | 5320 | mutex_lock(&mddev->open_mutex); |
| 5295 | if (atomic_read(&mddev->openers) > !!bdev) { | 5321 | if (atomic_read(&mddev->openers) > !!bdev || |
| 5322 | mddev->sync_thread || | ||
| 5323 | (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { | ||
| 5296 | printk("md: %s still in use.\n",mdname(mddev)); | 5324 | printk("md: %s still in use.\n",mdname(mddev)); |
| 5325 | if (did_freeze) { | ||
| 5326 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | ||
| 5327 | md_wakeup_thread(mddev->thread); | ||
| 5328 | } | ||
| 5297 | err = -EBUSY; | 5329 | err = -EBUSY; |
| 5298 | goto out; | 5330 | goto out; |
| 5299 | } | 5331 | } |
| 5300 | if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) { | ||
| 5301 | /* Someone opened the device since we flushed it | ||
| 5302 | * so page cache could be dirty and it is too late | ||
| 5303 | * to flush. So abort | ||
| 5304 | */ | ||
| 5305 | mutex_unlock(&mddev->open_mutex); | ||
| 5306 | return -EBUSY; | ||
| 5307 | } | ||
| 5308 | if (mddev->pers) { | 5332 | if (mddev->pers) { |
| 5309 | __md_stop_writes(mddev); | 5333 | __md_stop_writes(mddev); |
| 5310 | 5334 | ||
| @@ -5315,7 +5339,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) | |||
| 5315 | set_disk_ro(mddev->gendisk, 1); | 5339 | set_disk_ro(mddev->gendisk, 1); |
| 5316 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | 5340 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
| 5317 | sysfs_notify_dirent_safe(mddev->sysfs_state); | 5341 | sysfs_notify_dirent_safe(mddev->sysfs_state); |
| 5318 | err = 0; | 5342 | err = 0; |
| 5319 | } | 5343 | } |
| 5320 | out: | 5344 | out: |
| 5321 | mutex_unlock(&mddev->open_mutex); | 5345 | mutex_unlock(&mddev->open_mutex); |
| @@ -5331,20 +5355,34 @@ static int do_md_stop(struct mddev * mddev, int mode, | |||
| 5331 | { | 5355 | { |
| 5332 | struct gendisk *disk = mddev->gendisk; | 5356 | struct gendisk *disk = mddev->gendisk; |
| 5333 | struct md_rdev *rdev; | 5357 | struct md_rdev *rdev; |
| 5358 | int did_freeze = 0; | ||
| 5359 | |||
| 5360 | if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { | ||
| 5361 | did_freeze = 1; | ||
| 5362 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | ||
| 5363 | md_wakeup_thread(mddev->thread); | ||
| 5364 | } | ||
| 5365 | if (mddev->sync_thread) { | ||
| 5366 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | ||
| 5367 | /* Thread might be blocked waiting for metadata update | ||
| 5368 | * which will now never happen */ | ||
| 5369 | wake_up_process(mddev->sync_thread->tsk); | ||
| 5370 | } | ||
| 5371 | mddev_unlock(mddev); | ||
| 5372 | wait_event(resync_wait, mddev->sync_thread == NULL); | ||
| 5373 | mddev_lock_nointr(mddev); | ||
| 5334 | 5374 | ||
| 5335 | mutex_lock(&mddev->open_mutex); | 5375 | mutex_lock(&mddev->open_mutex); |
| 5336 | if (atomic_read(&mddev->openers) > !!bdev || | 5376 | if (atomic_read(&mddev->openers) > !!bdev || |
| 5337 | mddev->sysfs_active) { | 5377 | mddev->sysfs_active || |
| 5378 | mddev->sync_thread || | ||
| 5379 | (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { | ||
| 5338 | printk("md: %s still in use.\n",mdname(mddev)); | 5380 | printk("md: %s still in use.\n",mdname(mddev)); |
| 5339 | mutex_unlock(&mddev->open_mutex); | 5381 | mutex_unlock(&mddev->open_mutex); |
| 5340 | return -EBUSY; | 5382 | if (did_freeze) { |
| 5341 | } | 5383 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
| 5342 | if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) { | 5384 | md_wakeup_thread(mddev->thread); |
| 5343 | /* Someone opened the device since we flushed it | 5385 | } |
| 5344 | * so page cache could be dirty and it is too late | ||
| 5345 | * to flush. So abort | ||
| 5346 | */ | ||
| 5347 | mutex_unlock(&mddev->open_mutex); | ||
| 5348 | return -EBUSY; | 5386 | return -EBUSY; |
| 5349 | } | 5387 | } |
| 5350 | if (mddev->pers) { | 5388 | if (mddev->pers) { |
| @@ -6551,7 +6589,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 6551 | wait_event(mddev->sb_wait, | 6589 | wait_event(mddev->sb_wait, |
| 6552 | !test_bit(MD_CHANGE_DEVS, &mddev->flags) && | 6590 | !test_bit(MD_CHANGE_DEVS, &mddev->flags) && |
| 6553 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); | 6591 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); |
| 6554 | mddev_lock(mddev); | 6592 | mddev_lock_nointr(mddev); |
| 6555 | } | 6593 | } |
| 6556 | } else { | 6594 | } else { |
| 6557 | err = -EROFS; | 6595 | err = -EROFS; |
| @@ -7361,9 +7399,6 @@ void md_do_sync(struct md_thread *thread) | |||
| 7361 | mddev->curr_resync = 2; | 7399 | mddev->curr_resync = 2; |
| 7362 | 7400 | ||
| 7363 | try_again: | 7401 | try_again: |
| 7364 | if (kthread_should_stop()) | ||
| 7365 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | ||
| 7366 | |||
| 7367 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) | 7402 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) |
| 7368 | goto skip; | 7403 | goto skip; |
| 7369 | for_each_mddev(mddev2, tmp) { | 7404 | for_each_mddev(mddev2, tmp) { |
| @@ -7388,7 +7423,7 @@ void md_do_sync(struct md_thread *thread) | |||
| 7388 | * be caught by 'softlockup' | 7423 | * be caught by 'softlockup' |
| 7389 | */ | 7424 | */ |
| 7390 | prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); | 7425 | prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); |
| 7391 | if (!kthread_should_stop() && | 7426 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && |
| 7392 | mddev2->curr_resync >= mddev->curr_resync) { | 7427 | mddev2->curr_resync >= mddev->curr_resync) { |
| 7393 | printk(KERN_INFO "md: delaying %s of %s" | 7428 | printk(KERN_INFO "md: delaying %s of %s" |
| 7394 | " until %s has finished (they" | 7429 | " until %s has finished (they" |
| @@ -7464,7 +7499,7 @@ void md_do_sync(struct md_thread *thread) | |||
| 7464 | last_check = 0; | 7499 | last_check = 0; |
| 7465 | 7500 | ||
| 7466 | if (j>2) { | 7501 | if (j>2) { |
| 7467 | printk(KERN_INFO | 7502 | printk(KERN_INFO |
| 7468 | "md: resuming %s of %s from checkpoint.\n", | 7503 | "md: resuming %s of %s from checkpoint.\n", |
| 7469 | desc, mdname(mddev)); | 7504 | desc, mdname(mddev)); |
| 7470 | mddev->curr_resync = j; | 7505 | mddev->curr_resync = j; |
| @@ -7501,7 +7536,8 @@ void md_do_sync(struct md_thread *thread) | |||
| 7501 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); | 7536 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); |
| 7502 | } | 7537 | } |
| 7503 | 7538 | ||
| 7504 | while (j >= mddev->resync_max && !kthread_should_stop()) { | 7539 | while (j >= mddev->resync_max && |
| 7540 | !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | ||
| 7505 | /* As this condition is controlled by user-space, | 7541 | /* As this condition is controlled by user-space, |
| 7506 | * we can block indefinitely, so use '_interruptible' | 7542 | * we can block indefinitely, so use '_interruptible' |
| 7507 | * to avoid triggering warnings. | 7543 | * to avoid triggering warnings. |
| @@ -7509,17 +7545,18 @@ void md_do_sync(struct md_thread *thread) | |||
| 7509 | flush_signals(current); /* just in case */ | 7545 | flush_signals(current); /* just in case */ |
| 7510 | wait_event_interruptible(mddev->recovery_wait, | 7546 | wait_event_interruptible(mddev->recovery_wait, |
| 7511 | mddev->resync_max > j | 7547 | mddev->resync_max > j |
| 7512 | || kthread_should_stop()); | 7548 | || test_bit(MD_RECOVERY_INTR, |
| 7549 | &mddev->recovery)); | ||
| 7513 | } | 7550 | } |
| 7514 | 7551 | ||
| 7515 | if (kthread_should_stop()) | 7552 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) |
| 7516 | goto interrupted; | 7553 | break; |
| 7517 | 7554 | ||
| 7518 | sectors = mddev->pers->sync_request(mddev, j, &skipped, | 7555 | sectors = mddev->pers->sync_request(mddev, j, &skipped, |
| 7519 | currspeed < speed_min(mddev)); | 7556 | currspeed < speed_min(mddev)); |
| 7520 | if (sectors == 0) { | 7557 | if (sectors == 0) { |
| 7521 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | 7558 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
| 7522 | goto out; | 7559 | break; |
| 7523 | } | 7560 | } |
| 7524 | 7561 | ||
| 7525 | if (!skipped) { /* actual IO requested */ | 7562 | if (!skipped) { /* actual IO requested */ |
| @@ -7556,10 +7593,8 @@ void md_do_sync(struct md_thread *thread) | |||
| 7556 | last_mark = next; | 7593 | last_mark = next; |
| 7557 | } | 7594 | } |
| 7558 | 7595 | ||
| 7559 | 7596 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) | |
| 7560 | if (kthread_should_stop()) | 7597 | break; |
| 7561 | goto interrupted; | ||
| 7562 | |||
| 7563 | 7598 | ||
| 7564 | /* | 7599 | /* |
| 7565 | * this loop exits only if either when we are slower than | 7600 | * this loop exits only if either when we are slower than |
| @@ -7582,11 +7617,12 @@ void md_do_sync(struct md_thread *thread) | |||
| 7582 | } | 7617 | } |
| 7583 | } | 7618 | } |
| 7584 | } | 7619 | } |
| 7585 | printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); | 7620 | printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc, |
| 7621 | test_bit(MD_RECOVERY_INTR, &mddev->recovery) | ||
| 7622 | ? "interrupted" : "done"); | ||
| 7586 | /* | 7623 | /* |
| 7587 | * this also signals 'finished resyncing' to md_stop | 7624 | * this also signals 'finished resyncing' to md_stop |
| 7588 | */ | 7625 | */ |
| 7589 | out: | ||
| 7590 | blk_finish_plug(&plug); | 7626 | blk_finish_plug(&plug); |
| 7591 | wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); | 7627 | wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); |
| 7592 | 7628 | ||
| @@ -7640,16 +7676,6 @@ void md_do_sync(struct md_thread *thread) | |||
| 7640 | set_bit(MD_RECOVERY_DONE, &mddev->recovery); | 7676 | set_bit(MD_RECOVERY_DONE, &mddev->recovery); |
| 7641 | md_wakeup_thread(mddev->thread); | 7677 | md_wakeup_thread(mddev->thread); |
| 7642 | return; | 7678 | return; |
| 7643 | |||
| 7644 | interrupted: | ||
| 7645 | /* | ||
| 7646 | * got a signal, exit. | ||
| 7647 | */ | ||
| 7648 | printk(KERN_INFO | ||
| 7649 | "md: md_do_sync() got signal ... exiting\n"); | ||
| 7650 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | ||
| 7651 | goto out; | ||
| 7652 | |||
| 7653 | } | 7679 | } |
| 7654 | EXPORT_SYMBOL_GPL(md_do_sync); | 7680 | EXPORT_SYMBOL_GPL(md_do_sync); |
| 7655 | 7681 | ||
| @@ -7894,6 +7920,7 @@ void md_reap_sync_thread(struct mddev *mddev) | |||
| 7894 | 7920 | ||
| 7895 | /* resync has finished, collect result */ | 7921 | /* resync has finished, collect result */ |
| 7896 | md_unregister_thread(&mddev->sync_thread); | 7922 | md_unregister_thread(&mddev->sync_thread); |
| 7923 | wake_up(&resync_wait); | ||
| 7897 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && | 7924 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && |
| 7898 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { | 7925 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { |
| 7899 | /* success...*/ | 7926 | /* success...*/ |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index af6681b19776..1e5a540995e9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -66,7 +66,8 @@ | |||
| 66 | */ | 66 | */ |
| 67 | static int max_queued_requests = 1024; | 67 | static int max_queued_requests = 1024; |
| 68 | 68 | ||
| 69 | static void allow_barrier(struct r1conf *conf); | 69 | static void allow_barrier(struct r1conf *conf, sector_t start_next_window, |
| 70 | sector_t bi_sector); | ||
| 70 | static void lower_barrier(struct r1conf *conf); | 71 | static void lower_barrier(struct r1conf *conf); |
| 71 | 72 | ||
| 72 | static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) | 73 | static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) |
| @@ -84,10 +85,12 @@ static void r1bio_pool_free(void *r1_bio, void *data) | |||
| 84 | } | 85 | } |
| 85 | 86 | ||
| 86 | #define RESYNC_BLOCK_SIZE (64*1024) | 87 | #define RESYNC_BLOCK_SIZE (64*1024) |
| 87 | //#define RESYNC_BLOCK_SIZE PAGE_SIZE | 88 | #define RESYNC_DEPTH 32 |
| 88 | #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) | 89 | #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) |
| 89 | #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) | 90 | #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) |
| 90 | #define RESYNC_WINDOW (2048*1024) | 91 | #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) |
| 92 | #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) | ||
| 93 | #define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS) | ||
| 91 | 94 | ||
| 92 | static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) | 95 | static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) |
| 93 | { | 96 | { |
| @@ -225,6 +228,8 @@ static void call_bio_endio(struct r1bio *r1_bio) | |||
| 225 | struct bio *bio = r1_bio->master_bio; | 228 | struct bio *bio = r1_bio->master_bio; |
| 226 | int done; | 229 | int done; |
| 227 | struct r1conf *conf = r1_bio->mddev->private; | 230 | struct r1conf *conf = r1_bio->mddev->private; |
| 231 | sector_t start_next_window = r1_bio->start_next_window; | ||
| 232 | sector_t bi_sector = bio->bi_sector; | ||
| 228 | 233 | ||
| 229 | if (bio->bi_phys_segments) { | 234 | if (bio->bi_phys_segments) { |
| 230 | unsigned long flags; | 235 | unsigned long flags; |
| @@ -232,6 +237,11 @@ static void call_bio_endio(struct r1bio *r1_bio) | |||
| 232 | bio->bi_phys_segments--; | 237 | bio->bi_phys_segments--; |
| 233 | done = (bio->bi_phys_segments == 0); | 238 | done = (bio->bi_phys_segments == 0); |
| 234 | spin_unlock_irqrestore(&conf->device_lock, flags); | 239 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 240 | /* | ||
| 241 | * make_request() might be waiting for | ||
| 242 | * bi_phys_segments to decrease | ||
| 243 | */ | ||
| 244 | wake_up(&conf->wait_barrier); | ||
| 235 | } else | 245 | } else |
| 236 | done = 1; | 246 | done = 1; |
| 237 | 247 | ||
| @@ -243,7 +253,7 @@ static void call_bio_endio(struct r1bio *r1_bio) | |||
| 243 | * Wake up any possible resync thread that waits for the device | 253 | * Wake up any possible resync thread that waits for the device |
| 244 | * to go idle. | 254 | * to go idle. |
| 245 | */ | 255 | */ |
| 246 | allow_barrier(conf); | 256 | allow_barrier(conf, start_next_window, bi_sector); |
| 247 | } | 257 | } |
| 248 | } | 258 | } |
| 249 | 259 | ||
| @@ -814,8 +824,6 @@ static void flush_pending_writes(struct r1conf *conf) | |||
| 814 | * there is no normal IO happeing. It must arrange to call | 824 | * there is no normal IO happeing. It must arrange to call |
| 815 | * lower_barrier when the particular background IO completes. | 825 | * lower_barrier when the particular background IO completes. |
| 816 | */ | 826 | */ |
| 817 | #define RESYNC_DEPTH 32 | ||
| 818 | |||
| 819 | static void raise_barrier(struct r1conf *conf) | 827 | static void raise_barrier(struct r1conf *conf) |
| 820 | { | 828 | { |
| 821 | spin_lock_irq(&conf->resync_lock); | 829 | spin_lock_irq(&conf->resync_lock); |
| @@ -827,9 +835,19 @@ static void raise_barrier(struct r1conf *conf) | |||
| 827 | /* block any new IO from starting */ | 835 | /* block any new IO from starting */ |
| 828 | conf->barrier++; | 836 | conf->barrier++; |
| 829 | 837 | ||
| 830 | /* Now wait for all pending IO to complete */ | 838 | /* For these conditions we must wait: |
| 839 | * A: while the array is in frozen state | ||
| 840 | * B: while barrier >= RESYNC_DEPTH, meaning resync reach | ||
| 841 | * the max count which allowed. | ||
| 842 | * C: next_resync + RESYNC_SECTORS > start_next_window, meaning | ||
| 843 | * next resync will reach to the window which normal bios are | ||
| 844 | * handling. | ||
| 845 | */ | ||
| 831 | wait_event_lock_irq(conf->wait_barrier, | 846 | wait_event_lock_irq(conf->wait_barrier, |
| 832 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, | 847 | !conf->array_frozen && |
| 848 | conf->barrier < RESYNC_DEPTH && | ||
| 849 | (conf->start_next_window >= | ||
| 850 | conf->next_resync + RESYNC_SECTORS), | ||
| 833 | conf->resync_lock); | 851 | conf->resync_lock); |
| 834 | 852 | ||
| 835 | spin_unlock_irq(&conf->resync_lock); | 853 | spin_unlock_irq(&conf->resync_lock); |
| @@ -845,10 +863,33 @@ static void lower_barrier(struct r1conf *conf) | |||
| 845 | wake_up(&conf->wait_barrier); | 863 | wake_up(&conf->wait_barrier); |
| 846 | } | 864 | } |
| 847 | 865 | ||
| 848 | static void wait_barrier(struct r1conf *conf) | 866 | static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) |
| 849 | { | 867 | { |
| 868 | bool wait = false; | ||
| 869 | |||
| 870 | if (conf->array_frozen || !bio) | ||
| 871 | wait = true; | ||
| 872 | else if (conf->barrier && bio_data_dir(bio) == WRITE) { | ||
| 873 | if (conf->next_resync < RESYNC_WINDOW_SECTORS) | ||
| 874 | wait = true; | ||
| 875 | else if ((conf->next_resync - RESYNC_WINDOW_SECTORS | ||
| 876 | >= bio_end_sector(bio)) || | ||
| 877 | (conf->next_resync + NEXT_NORMALIO_DISTANCE | ||
| 878 | <= bio->bi_sector)) | ||
| 879 | wait = false; | ||
| 880 | else | ||
| 881 | wait = true; | ||
| 882 | } | ||
| 883 | |||
| 884 | return wait; | ||
| 885 | } | ||
| 886 | |||
| 887 | static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) | ||
| 888 | { | ||
| 889 | sector_t sector = 0; | ||
| 890 | |||
| 850 | spin_lock_irq(&conf->resync_lock); | 891 | spin_lock_irq(&conf->resync_lock); |
| 851 | if (conf->barrier) { | 892 | if (need_to_wait_for_sync(conf, bio)) { |
| 852 | conf->nr_waiting++; | 893 | conf->nr_waiting++; |
| 853 | /* Wait for the barrier to drop. | 894 | /* Wait for the barrier to drop. |
| 854 | * However if there are already pending | 895 | * However if there are already pending |
| @@ -860,22 +901,67 @@ static void wait_barrier(struct r1conf *conf) | |||
| 860 | * count down. | 901 | * count down. |
| 861 | */ | 902 | */ |
| 862 | wait_event_lock_irq(conf->wait_barrier, | 903 | wait_event_lock_irq(conf->wait_barrier, |
| 863 | !conf->barrier || | 904 | !conf->array_frozen && |
| 864 | (conf->nr_pending && | 905 | (!conf->barrier || |
| 906 | ((conf->start_next_window < | ||
| 907 | conf->next_resync + RESYNC_SECTORS) && | ||
| 865 | current->bio_list && | 908 | current->bio_list && |
| 866 | !bio_list_empty(current->bio_list)), | 909 | !bio_list_empty(current->bio_list))), |
| 867 | conf->resync_lock); | 910 | conf->resync_lock); |
| 868 | conf->nr_waiting--; | 911 | conf->nr_waiting--; |
| 869 | } | 912 | } |
| 913 | |||
| 914 | if (bio && bio_data_dir(bio) == WRITE) { | ||
| 915 | if (conf->next_resync + NEXT_NORMALIO_DISTANCE | ||
| 916 | <= bio->bi_sector) { | ||
| 917 | if (conf->start_next_window == MaxSector) | ||
| 918 | conf->start_next_window = | ||
| 919 | conf->next_resync + | ||
| 920 | NEXT_NORMALIO_DISTANCE; | ||
| 921 | |||
| 922 | if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) | ||
| 923 | <= bio->bi_sector) | ||
| 924 | conf->next_window_requests++; | ||
| 925 | else | ||
| 926 | conf->current_window_requests++; | ||
| 927 | } | ||
| 928 | if (bio->bi_sector >= conf->start_next_window) | ||
| 929 | sector = conf->start_next_window; | ||
| 930 | } | ||
| 931 | |||
| 870 | conf->nr_pending++; | 932 | conf->nr_pending++; |
| 871 | spin_unlock_irq(&conf->resync_lock); | 933 | spin_unlock_irq(&conf->resync_lock); |
| 934 | return sector; | ||
| 872 | } | 935 | } |
| 873 | 936 | ||
| 874 | static void allow_barrier(struct r1conf *conf) | 937 | static void allow_barrier(struct r1conf *conf, sector_t start_next_window, |
| 938 | sector_t bi_sector) | ||
| 875 | { | 939 | { |
| 876 | unsigned long flags; | 940 | unsigned long flags; |
| 941 | |||
| 877 | spin_lock_irqsave(&conf->resync_lock, flags); | 942 | spin_lock_irqsave(&conf->resync_lock, flags); |
| 878 | conf->nr_pending--; | 943 | conf->nr_pending--; |
| 944 | if (start_next_window) { | ||
| 945 | if (start_next_window == conf->start_next_window) { | ||
| 946 | if (conf->start_next_window + NEXT_NORMALIO_DISTANCE | ||
| 947 | <= bi_sector) | ||
| 948 | conf->next_window_requests--; | ||
| 949 | else | ||
| 950 | conf->current_window_requests--; | ||
| 951 | } else | ||
| 952 | conf->current_window_requests--; | ||
| 953 | |||
| 954 | if (!conf->current_window_requests) { | ||
| 955 | if (conf->next_window_requests) { | ||
| 956 | conf->current_window_requests = | ||
| 957 | conf->next_window_requests; | ||
| 958 | conf->next_window_requests = 0; | ||
| 959 | conf->start_next_window += | ||
| 960 | NEXT_NORMALIO_DISTANCE; | ||
| 961 | } else | ||
| 962 | conf->start_next_window = MaxSector; | ||
| 963 | } | ||
| 964 | } | ||
| 879 | spin_unlock_irqrestore(&conf->resync_lock, flags); | 965 | spin_unlock_irqrestore(&conf->resync_lock, flags); |
| 880 | wake_up(&conf->wait_barrier); | 966 | wake_up(&conf->wait_barrier); |
| 881 | } | 967 | } |
| @@ -884,8 +970,7 @@ static void freeze_array(struct r1conf *conf, int extra) | |||
| 884 | { | 970 | { |
| 885 | /* stop syncio and normal IO and wait for everything to | 971 | /* stop syncio and normal IO and wait for everything to |
| 886 | * go quite. | 972 | * go quite. |
| 887 | * We increment barrier and nr_waiting, and then | 973 | * We wait until nr_pending match nr_queued+extra |
| 888 | * wait until nr_pending match nr_queued+extra | ||
| 889 | * This is called in the context of one normal IO request | 974 | * This is called in the context of one normal IO request |
| 890 | * that has failed. Thus any sync request that might be pending | 975 | * that has failed. Thus any sync request that might be pending |
| 891 | * will be blocked by nr_pending, and we need to wait for | 976 | * will be blocked by nr_pending, and we need to wait for |
| @@ -895,8 +980,7 @@ static void freeze_array(struct r1conf *conf, int extra) | |||
| 895 | * we continue. | 980 | * we continue. |
| 896 | */ | 981 | */ |
| 897 | spin_lock_irq(&conf->resync_lock); | 982 | spin_lock_irq(&conf->resync_lock); |
| 898 | conf->barrier++; | 983 | conf->array_frozen = 1; |
| 899 | conf->nr_waiting++; | ||
| 900 | wait_event_lock_irq_cmd(conf->wait_barrier, | 984 | wait_event_lock_irq_cmd(conf->wait_barrier, |
| 901 | conf->nr_pending == conf->nr_queued+extra, | 985 | conf->nr_pending == conf->nr_queued+extra, |
| 902 | conf->resync_lock, | 986 | conf->resync_lock, |
| @@ -907,8 +991,7 @@ static void unfreeze_array(struct r1conf *conf) | |||
| 907 | { | 991 | { |
| 908 | /* reverse the effect of the freeze */ | 992 | /* reverse the effect of the freeze */ |
| 909 | spin_lock_irq(&conf->resync_lock); | 993 | spin_lock_irq(&conf->resync_lock); |
| 910 | conf->barrier--; | 994 | conf->array_frozen = 0; |
| 911 | conf->nr_waiting--; | ||
| 912 | wake_up(&conf->wait_barrier); | 995 | wake_up(&conf->wait_barrier); |
| 913 | spin_unlock_irq(&conf->resync_lock); | 996 | spin_unlock_irq(&conf->resync_lock); |
| 914 | } | 997 | } |
| @@ -1013,6 +1096,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
| 1013 | int first_clone; | 1096 | int first_clone; |
| 1014 | int sectors_handled; | 1097 | int sectors_handled; |
| 1015 | int max_sectors; | 1098 | int max_sectors; |
| 1099 | sector_t start_next_window; | ||
| 1016 | 1100 | ||
| 1017 | /* | 1101 | /* |
| 1018 | * Register the new request and wait if the reconstruction | 1102 | * Register the new request and wait if the reconstruction |
| @@ -1042,7 +1126,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
| 1042 | finish_wait(&conf->wait_barrier, &w); | 1126 | finish_wait(&conf->wait_barrier, &w); |
| 1043 | } | 1127 | } |
| 1044 | 1128 | ||
| 1045 | wait_barrier(conf); | 1129 | start_next_window = wait_barrier(conf, bio); |
| 1046 | 1130 | ||
| 1047 | bitmap = mddev->bitmap; | 1131 | bitmap = mddev->bitmap; |
| 1048 | 1132 | ||
| @@ -1163,6 +1247,7 @@ read_again: | |||
| 1163 | 1247 | ||
| 1164 | disks = conf->raid_disks * 2; | 1248 | disks = conf->raid_disks * 2; |
| 1165 | retry_write: | 1249 | retry_write: |
| 1250 | r1_bio->start_next_window = start_next_window; | ||
| 1166 | blocked_rdev = NULL; | 1251 | blocked_rdev = NULL; |
| 1167 | rcu_read_lock(); | 1252 | rcu_read_lock(); |
| 1168 | max_sectors = r1_bio->sectors; | 1253 | max_sectors = r1_bio->sectors; |
| @@ -1231,14 +1316,24 @@ read_again: | |||
| 1231 | if (unlikely(blocked_rdev)) { | 1316 | if (unlikely(blocked_rdev)) { |
| 1232 | /* Wait for this device to become unblocked */ | 1317 | /* Wait for this device to become unblocked */ |
| 1233 | int j; | 1318 | int j; |
| 1319 | sector_t old = start_next_window; | ||
| 1234 | 1320 | ||
| 1235 | for (j = 0; j < i; j++) | 1321 | for (j = 0; j < i; j++) |
| 1236 | if (r1_bio->bios[j]) | 1322 | if (r1_bio->bios[j]) |
| 1237 | rdev_dec_pending(conf->mirrors[j].rdev, mddev); | 1323 | rdev_dec_pending(conf->mirrors[j].rdev, mddev); |
| 1238 | r1_bio->state = 0; | 1324 | r1_bio->state = 0; |
| 1239 | allow_barrier(conf); | 1325 | allow_barrier(conf, start_next_window, bio->bi_sector); |
| 1240 | md_wait_for_blocked_rdev(blocked_rdev, mddev); | 1326 | md_wait_for_blocked_rdev(blocked_rdev, mddev); |
| 1241 | wait_barrier(conf); | 1327 | start_next_window = wait_barrier(conf, bio); |
| 1328 | /* | ||
| 1329 | * We must make sure the multi r1bios of bio have | ||
| 1330 | * the same value of bi_phys_segments | ||
| 1331 | */ | ||
| 1332 | if (bio->bi_phys_segments && old && | ||
| 1333 | old != start_next_window) | ||
| 1334 | /* Wait for the former r1bio(s) to complete */ | ||
| 1335 | wait_event(conf->wait_barrier, | ||
| 1336 | bio->bi_phys_segments == 1); | ||
| 1242 | goto retry_write; | 1337 | goto retry_write; |
| 1243 | } | 1338 | } |
| 1244 | 1339 | ||
| @@ -1438,11 +1533,14 @@ static void print_conf(struct r1conf *conf) | |||
| 1438 | 1533 | ||
| 1439 | static void close_sync(struct r1conf *conf) | 1534 | static void close_sync(struct r1conf *conf) |
| 1440 | { | 1535 | { |
| 1441 | wait_barrier(conf); | 1536 | wait_barrier(conf, NULL); |
| 1442 | allow_barrier(conf); | 1537 | allow_barrier(conf, 0, 0); |
| 1443 | 1538 | ||
| 1444 | mempool_destroy(conf->r1buf_pool); | 1539 | mempool_destroy(conf->r1buf_pool); |
| 1445 | conf->r1buf_pool = NULL; | 1540 | conf->r1buf_pool = NULL; |
| 1541 | |||
| 1542 | conf->next_resync = 0; | ||
| 1543 | conf->start_next_window = MaxSector; | ||
| 1446 | } | 1544 | } |
| 1447 | 1545 | ||
| 1448 | static int raid1_spare_active(struct mddev *mddev) | 1546 | static int raid1_spare_active(struct mddev *mddev) |
| @@ -2714,6 +2812,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) | |||
| 2714 | conf->pending_count = 0; | 2812 | conf->pending_count = 0; |
| 2715 | conf->recovery_disabled = mddev->recovery_disabled - 1; | 2813 | conf->recovery_disabled = mddev->recovery_disabled - 1; |
| 2716 | 2814 | ||
| 2815 | conf->start_next_window = MaxSector; | ||
| 2816 | conf->current_window_requests = conf->next_window_requests = 0; | ||
| 2817 | |||
| 2717 | err = -EIO; | 2818 | err = -EIO; |
| 2718 | for (i = 0; i < conf->raid_disks * 2; i++) { | 2819 | for (i = 0; i < conf->raid_disks * 2; i++) { |
| 2719 | 2820 | ||
| @@ -2871,8 +2972,8 @@ static int stop(struct mddev *mddev) | |||
| 2871 | atomic_read(&bitmap->behind_writes) == 0); | 2972 | atomic_read(&bitmap->behind_writes) == 0); |
| 2872 | } | 2973 | } |
| 2873 | 2974 | ||
| 2874 | raise_barrier(conf); | 2975 | freeze_array(conf, 0); |
| 2875 | lower_barrier(conf); | 2976 | unfreeze_array(conf); |
| 2876 | 2977 | ||
| 2877 | md_unregister_thread(&mddev->thread); | 2978 | md_unregister_thread(&mddev->thread); |
| 2878 | if (conf->r1bio_pool) | 2979 | if (conf->r1bio_pool) |
| @@ -3031,10 +3132,10 @@ static void raid1_quiesce(struct mddev *mddev, int state) | |||
| 3031 | wake_up(&conf->wait_barrier); | 3132 | wake_up(&conf->wait_barrier); |
| 3032 | break; | 3133 | break; |
| 3033 | case 1: | 3134 | case 1: |
| 3034 | raise_barrier(conf); | 3135 | freeze_array(conf, 0); |
| 3035 | break; | 3136 | break; |
| 3036 | case 0: | 3137 | case 0: |
| 3037 | lower_barrier(conf); | 3138 | unfreeze_array(conf); |
| 3038 | break; | 3139 | break; |
| 3039 | } | 3140 | } |
| 3040 | } | 3141 | } |
| @@ -3051,7 +3152,8 @@ static void *raid1_takeover(struct mddev *mddev) | |||
| 3051 | mddev->new_chunk_sectors = 0; | 3152 | mddev->new_chunk_sectors = 0; |
| 3052 | conf = setup_conf(mddev); | 3153 | conf = setup_conf(mddev); |
| 3053 | if (!IS_ERR(conf)) | 3154 | if (!IS_ERR(conf)) |
| 3054 | conf->barrier = 1; | 3155 | /* Array must appear to be quiesced */ |
| 3156 | conf->array_frozen = 1; | ||
| 3055 | return conf; | 3157 | return conf; |
| 3056 | } | 3158 | } |
| 3057 | return ERR_PTR(-EINVAL); | 3159 | return ERR_PTR(-EINVAL); |
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 0ff3715fb7eb..9bebca7bff2f 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h | |||
| @@ -41,6 +41,19 @@ struct r1conf { | |||
| 41 | */ | 41 | */ |
| 42 | sector_t next_resync; | 42 | sector_t next_resync; |
| 43 | 43 | ||
| 44 | /* When raid1 starts resync, we divide array into four partitions | ||
| 45 | * |---------|--------------|---------------------|-------------| | ||
| 46 | * next_resync start_next_window end_window | ||
| 47 | * start_next_window = next_resync + NEXT_NORMALIO_DISTANCE | ||
| 48 | * end_window = start_next_window + NEXT_NORMALIO_DISTANCE | ||
| 49 | * current_window_requests means the count of normalIO between | ||
| 50 | * start_next_window and end_window. | ||
| 51 | * next_window_requests means the count of normalIO after end_window. | ||
| 52 | * */ | ||
| 53 | sector_t start_next_window; | ||
| 54 | int current_window_requests; | ||
| 55 | int next_window_requests; | ||
| 56 | |||
| 44 | spinlock_t device_lock; | 57 | spinlock_t device_lock; |
| 45 | 58 | ||
| 46 | /* list of 'struct r1bio' that need to be processed by raid1d, | 59 | /* list of 'struct r1bio' that need to be processed by raid1d, |
| @@ -65,6 +78,7 @@ struct r1conf { | |||
| 65 | int nr_waiting; | 78 | int nr_waiting; |
| 66 | int nr_queued; | 79 | int nr_queued; |
| 67 | int barrier; | 80 | int barrier; |
| 81 | int array_frozen; | ||
| 68 | 82 | ||
| 69 | /* Set to 1 if a full sync is needed, (fresh device added). | 83 | /* Set to 1 if a full sync is needed, (fresh device added). |
| 70 | * Cleared when a sync completes. | 84 | * Cleared when a sync completes. |
| @@ -111,6 +125,7 @@ struct r1bio { | |||
| 111 | * in this BehindIO request | 125 | * in this BehindIO request |
| 112 | */ | 126 | */ |
| 113 | sector_t sector; | 127 | sector_t sector; |
| 128 | sector_t start_next_window; | ||
| 114 | int sectors; | 129 | int sectors; |
| 115 | unsigned long state; | 130 | unsigned long state; |
| 116 | struct mddev *mddev; | 131 | struct mddev *mddev; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 7c3508abb5e1..c504e8389e69 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -4384,7 +4384,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, | |||
| 4384 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | 4384 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
| 4385 | md_wakeup_thread(mddev->thread); | 4385 | md_wakeup_thread(mddev->thread); |
| 4386 | wait_event(mddev->sb_wait, mddev->flags == 0 || | 4386 | wait_event(mddev->sb_wait, mddev->flags == 0 || |
| 4387 | kthread_should_stop()); | 4387 | test_bit(MD_RECOVERY_INTR, &mddev->recovery)); |
| 4388 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | ||
| 4389 | allow_barrier(conf); | ||
| 4390 | return sectors_done; | ||
| 4391 | } | ||
| 4388 | conf->reshape_safe = mddev->reshape_position; | 4392 | conf->reshape_safe = mddev->reshape_position; |
| 4389 | allow_barrier(conf); | 4393 | allow_barrier(conf); |
| 4390 | } | 4394 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7f0e17a27aeb..47da0af6322b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -85,6 +85,42 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) | |||
| 85 | return &conf->stripe_hashtbl[hash]; | 85 | return &conf->stripe_hashtbl[hash]; |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | static inline int stripe_hash_locks_hash(sector_t sect) | ||
| 89 | { | ||
| 90 | return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK; | ||
| 91 | } | ||
| 92 | |||
| 93 | static inline void lock_device_hash_lock(struct r5conf *conf, int hash) | ||
| 94 | { | ||
| 95 | spin_lock_irq(conf->hash_locks + hash); | ||
| 96 | spin_lock(&conf->device_lock); | ||
| 97 | } | ||
| 98 | |||
| 99 | static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) | ||
| 100 | { | ||
| 101 | spin_unlock(&conf->device_lock); | ||
| 102 | spin_unlock_irq(conf->hash_locks + hash); | ||
| 103 | } | ||
| 104 | |||
| 105 | static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) | ||
| 106 | { | ||
| 107 | int i; | ||
| 108 | local_irq_disable(); | ||
| 109 | spin_lock(conf->hash_locks); | ||
| 110 | for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) | ||
| 111 | spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); | ||
| 112 | spin_lock(&conf->device_lock); | ||
| 113 | } | ||
| 114 | |||
| 115 | static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) | ||
| 116 | { | ||
| 117 | int i; | ||
| 118 | spin_unlock(&conf->device_lock); | ||
| 119 | for (i = NR_STRIPE_HASH_LOCKS; i; i--) | ||
| 120 | spin_unlock(conf->hash_locks + i - 1); | ||
| 121 | local_irq_enable(); | ||
| 122 | } | ||
| 123 | |||
| 88 | /* bio's attached to a stripe+device for I/O are linked together in bi_sector | 124 | /* bio's attached to a stripe+device for I/O are linked together in bi_sector |
| 89 | * order without overlap. There may be several bio's per stripe+device, and | 125 | * order without overlap. There may be several bio's per stripe+device, and |
| 90 | * a bio could span several devices. | 126 | * a bio could span several devices. |
| @@ -249,7 +285,8 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh) | |||
| 249 | } | 285 | } |
| 250 | } | 286 | } |
| 251 | 287 | ||
| 252 | static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) | 288 | static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, |
| 289 | struct list_head *temp_inactive_list) | ||
| 253 | { | 290 | { |
| 254 | BUG_ON(!list_empty(&sh->lru)); | 291 | BUG_ON(!list_empty(&sh->lru)); |
| 255 | BUG_ON(atomic_read(&conf->active_stripes)==0); | 292 | BUG_ON(atomic_read(&conf->active_stripes)==0); |
| @@ -278,23 +315,68 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) | |||
| 278 | < IO_THRESHOLD) | 315 | < IO_THRESHOLD) |
| 279 | md_wakeup_thread(conf->mddev->thread); | 316 | md_wakeup_thread(conf->mddev->thread); |
| 280 | atomic_dec(&conf->active_stripes); | 317 | atomic_dec(&conf->active_stripes); |
| 281 | if (!test_bit(STRIPE_EXPANDING, &sh->state)) { | 318 | if (!test_bit(STRIPE_EXPANDING, &sh->state)) |
| 282 | list_add_tail(&sh->lru, &conf->inactive_list); | 319 | list_add_tail(&sh->lru, temp_inactive_list); |
| 283 | wake_up(&conf->wait_for_stripe); | ||
| 284 | if (conf->retry_read_aligned) | ||
| 285 | md_wakeup_thread(conf->mddev->thread); | ||
| 286 | } | ||
| 287 | } | 320 | } |
| 288 | } | 321 | } |
| 289 | 322 | ||
| 290 | static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) | 323 | static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, |
| 324 | struct list_head *temp_inactive_list) | ||
| 291 | { | 325 | { |
| 292 | if (atomic_dec_and_test(&sh->count)) | 326 | if (atomic_dec_and_test(&sh->count)) |
| 293 | do_release_stripe(conf, sh); | 327 | do_release_stripe(conf, sh, temp_inactive_list); |
| 328 | } | ||
| 329 | |||
| 330 | /* | ||
| 331 | * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list | ||
| 332 | * | ||
| 333 | * Be careful: Only one task can add/delete stripes from temp_inactive_list at | ||
| 334 | * given time. Adding stripes only takes device lock, while deleting stripes | ||
| 335 | * only takes hash lock. | ||
| 336 | */ | ||
| 337 | static void release_inactive_stripe_list(struct r5conf *conf, | ||
| 338 | struct list_head *temp_inactive_list, | ||
| 339 | int hash) | ||
| 340 | { | ||
| 341 | int size; | ||
| 342 | bool do_wakeup = false; | ||
| 343 | unsigned long flags; | ||
| 344 | |||
| 345 | if (hash == NR_STRIPE_HASH_LOCKS) { | ||
| 346 | size = NR_STRIPE_HASH_LOCKS; | ||
| 347 | hash = NR_STRIPE_HASH_LOCKS - 1; | ||
| 348 | } else | ||
| 349 | size = 1; | ||
| 350 | while (size) { | ||
| 351 | struct list_head *list = &temp_inactive_list[size - 1]; | ||
| 352 | |||
| 353 | /* | ||
| 354 | * We don't hold any lock here yet, get_active_stripe() might | ||
| 355 | * remove stripes from the list | ||
| 356 | */ | ||
| 357 | if (!list_empty_careful(list)) { | ||
| 358 | spin_lock_irqsave(conf->hash_locks + hash, flags); | ||
| 359 | if (list_empty(conf->inactive_list + hash) && | ||
| 360 | !list_empty(list)) | ||
| 361 | atomic_dec(&conf->empty_inactive_list_nr); | ||
| 362 | list_splice_tail_init(list, conf->inactive_list + hash); | ||
| 363 | do_wakeup = true; | ||
| 364 | spin_unlock_irqrestore(conf->hash_locks + hash, flags); | ||
| 365 | } | ||
| 366 | size--; | ||
| 367 | hash--; | ||
| 368 | } | ||
| 369 | |||
| 370 | if (do_wakeup) { | ||
| 371 | wake_up(&conf->wait_for_stripe); | ||
| 372 | if (conf->retry_read_aligned) | ||
| 373 | md_wakeup_thread(conf->mddev->thread); | ||
| 374 | } | ||
| 294 | } | 375 | } |
| 295 | 376 | ||
| 296 | /* should hold conf->device_lock already */ | 377 | /* should hold conf->device_lock already */ |
| 297 | static int release_stripe_list(struct r5conf *conf) | 378 | static int release_stripe_list(struct r5conf *conf, |
| 379 | struct list_head *temp_inactive_list) | ||
| 298 | { | 380 | { |
| 299 | struct stripe_head *sh; | 381 | struct stripe_head *sh; |
| 300 | int count = 0; | 382 | int count = 0; |
| @@ -303,6 +385,8 @@ static int release_stripe_list(struct r5conf *conf) | |||
| 303 | head = llist_del_all(&conf->released_stripes); | 385 | head = llist_del_all(&conf->released_stripes); |
| 304 | head = llist_reverse_order(head); | 386 | head = llist_reverse_order(head); |
| 305 | while (head) { | 387 | while (head) { |
| 388 | int hash; | ||
| 389 | |||
| 306 | sh = llist_entry(head, struct stripe_head, release_list); | 390 | sh = llist_entry(head, struct stripe_head, release_list); |
| 307 | head = llist_next(head); | 391 | head = llist_next(head); |
| 308 | /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ | 392 | /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ |
| @@ -313,7 +397,8 @@ static int release_stripe_list(struct r5conf *conf) | |||
| 313 | * again, the count is always > 1. This is true for | 397 | * again, the count is always > 1. This is true for |
| 314 | * STRIPE_ON_UNPLUG_LIST bit too. | 398 | * STRIPE_ON_UNPLUG_LIST bit too. |
| 315 | */ | 399 | */ |
| 316 | __release_stripe(conf, sh); | 400 | hash = sh->hash_lock_index; |
| 401 | __release_stripe(conf, sh, &temp_inactive_list[hash]); | ||
| 317 | count++; | 402 | count++; |
| 318 | } | 403 | } |
| 319 | 404 | ||
| @@ -324,9 +409,12 @@ static void release_stripe(struct stripe_head *sh) | |||
| 324 | { | 409 | { |
| 325 | struct r5conf *conf = sh->raid_conf; | 410 | struct r5conf *conf = sh->raid_conf; |
| 326 | unsigned long flags; | 411 | unsigned long flags; |
| 412 | struct list_head list; | ||
| 413 | int hash; | ||
| 327 | bool wakeup; | 414 | bool wakeup; |
| 328 | 415 | ||
| 329 | if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) | 416 | if (unlikely(!conf->mddev->thread) || |
| 417 | test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) | ||
| 330 | goto slow_path; | 418 | goto slow_path; |
| 331 | wakeup = llist_add(&sh->release_list, &conf->released_stripes); | 419 | wakeup = llist_add(&sh->release_list, &conf->released_stripes); |
| 332 | if (wakeup) | 420 | if (wakeup) |
| @@ -336,8 +424,11 @@ slow_path: | |||
| 336 | local_irq_save(flags); | 424 | local_irq_save(flags); |
| 337 | /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ | 425 | /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ |
| 338 | if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { | 426 | if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { |
| 339 | do_release_stripe(conf, sh); | 427 | INIT_LIST_HEAD(&list); |
| 428 | hash = sh->hash_lock_index; | ||
| 429 | do_release_stripe(conf, sh, &list); | ||
| 340 | spin_unlock(&conf->device_lock); | 430 | spin_unlock(&conf->device_lock); |
| 431 | release_inactive_stripe_list(conf, &list, hash); | ||
| 341 | } | 432 | } |
| 342 | local_irq_restore(flags); | 433 | local_irq_restore(flags); |
| 343 | } | 434 | } |
| @@ -362,18 +453,21 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) | |||
| 362 | 453 | ||
| 363 | 454 | ||
| 364 | /* find an idle stripe, make sure it is unhashed, and return it. */ | 455 | /* find an idle stripe, make sure it is unhashed, and return it. */ |
| 365 | static struct stripe_head *get_free_stripe(struct r5conf *conf) | 456 | static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) |
| 366 | { | 457 | { |
| 367 | struct stripe_head *sh = NULL; | 458 | struct stripe_head *sh = NULL; |
| 368 | struct list_head *first; | 459 | struct list_head *first; |
| 369 | 460 | ||
| 370 | if (list_empty(&conf->inactive_list)) | 461 | if (list_empty(conf->inactive_list + hash)) |
| 371 | goto out; | 462 | goto out; |
| 372 | first = conf->inactive_list.next; | 463 | first = (conf->inactive_list + hash)->next; |
| 373 | sh = list_entry(first, struct stripe_head, lru); | 464 | sh = list_entry(first, struct stripe_head, lru); |
| 374 | list_del_init(first); | 465 | list_del_init(first); |
| 375 | remove_hash(sh); | 466 | remove_hash(sh); |
| 376 | atomic_inc(&conf->active_stripes); | 467 | atomic_inc(&conf->active_stripes); |
| 468 | BUG_ON(hash != sh->hash_lock_index); | ||
| 469 | if (list_empty(conf->inactive_list + hash)) | ||
| 470 | atomic_inc(&conf->empty_inactive_list_nr); | ||
| 377 | out: | 471 | out: |
| 378 | return sh; | 472 | return sh; |
| 379 | } | 473 | } |
| @@ -416,7 +510,7 @@ static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, | |||
| 416 | static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) | 510 | static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) |
| 417 | { | 511 | { |
| 418 | struct r5conf *conf = sh->raid_conf; | 512 | struct r5conf *conf = sh->raid_conf; |
| 419 | int i; | 513 | int i, seq; |
| 420 | 514 | ||
| 421 | BUG_ON(atomic_read(&sh->count) != 0); | 515 | BUG_ON(atomic_read(&sh->count) != 0); |
| 422 | BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); | 516 | BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); |
| @@ -426,7 +520,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) | |||
| 426 | (unsigned long long)sh->sector); | 520 | (unsigned long long)sh->sector); |
| 427 | 521 | ||
| 428 | remove_hash(sh); | 522 | remove_hash(sh); |
| 429 | 523 | retry: | |
| 524 | seq = read_seqcount_begin(&conf->gen_lock); | ||
| 430 | sh->generation = conf->generation - previous; | 525 | sh->generation = conf->generation - previous; |
| 431 | sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; | 526 | sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; |
| 432 | sh->sector = sector; | 527 | sh->sector = sector; |
| @@ -448,6 +543,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) | |||
| 448 | dev->flags = 0; | 543 | dev->flags = 0; |
| 449 | raid5_build_block(sh, i, previous); | 544 | raid5_build_block(sh, i, previous); |
| 450 | } | 545 | } |
| 546 | if (read_seqcount_retry(&conf->gen_lock, seq)) | ||
| 547 | goto retry; | ||
| 451 | insert_hash(conf, sh); | 548 | insert_hash(conf, sh); |
| 452 | sh->cpu = smp_processor_id(); | 549 | sh->cpu = smp_processor_id(); |
| 453 | } | 550 | } |
| @@ -552,29 +649,31 @@ get_active_stripe(struct r5conf *conf, sector_t sector, | |||
| 552 | int previous, int noblock, int noquiesce) | 649 | int previous, int noblock, int noquiesce) |
| 553 | { | 650 | { |
| 554 | struct stripe_head *sh; | 651 | struct stripe_head *sh; |
| 652 | int hash = stripe_hash_locks_hash(sector); | ||
| 555 | 653 | ||
| 556 | pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); | 654 | pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); |
| 557 | 655 | ||
| 558 | spin_lock_irq(&conf->device_lock); | 656 | spin_lock_irq(conf->hash_locks + hash); |
| 559 | 657 | ||
| 560 | do { | 658 | do { |
| 561 | wait_event_lock_irq(conf->wait_for_stripe, | 659 | wait_event_lock_irq(conf->wait_for_stripe, |
| 562 | conf->quiesce == 0 || noquiesce, | 660 | conf->quiesce == 0 || noquiesce, |
| 563 | conf->device_lock); | 661 | *(conf->hash_locks + hash)); |
| 564 | sh = __find_stripe(conf, sector, conf->generation - previous); | 662 | sh = __find_stripe(conf, sector, conf->generation - previous); |
| 565 | if (!sh) { | 663 | if (!sh) { |
| 566 | if (!conf->inactive_blocked) | 664 | if (!conf->inactive_blocked) |
| 567 | sh = get_free_stripe(conf); | 665 | sh = get_free_stripe(conf, hash); |
| 568 | if (noblock && sh == NULL) | 666 | if (noblock && sh == NULL) |
| 569 | break; | 667 | break; |
| 570 | if (!sh) { | 668 | if (!sh) { |
| 571 | conf->inactive_blocked = 1; | 669 | conf->inactive_blocked = 1; |
| 572 | wait_event_lock_irq(conf->wait_for_stripe, | 670 | wait_event_lock_irq( |
| 573 | !list_empty(&conf->inactive_list) && | 671 | conf->wait_for_stripe, |
| 574 | (atomic_read(&conf->active_stripes) | 672 | !list_empty(conf->inactive_list + hash) && |
| 575 | < (conf->max_nr_stripes *3/4) | 673 | (atomic_read(&conf->active_stripes) |
| 576 | || !conf->inactive_blocked), | 674 | < (conf->max_nr_stripes * 3 / 4) |
| 577 | conf->device_lock); | 675 | || !conf->inactive_blocked), |
| 676 | *(conf->hash_locks + hash)); | ||
| 578 | conf->inactive_blocked = 0; | 677 | conf->inactive_blocked = 0; |
| 579 | } else | 678 | } else |
| 580 | init_stripe(sh, sector, previous); | 679 | init_stripe(sh, sector, previous); |
| @@ -585,9 +684,11 @@ get_active_stripe(struct r5conf *conf, sector_t sector, | |||
| 585 | && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state) | 684 | && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state) |
| 586 | && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); | 685 | && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); |
| 587 | } else { | 686 | } else { |
| 687 | spin_lock(&conf->device_lock); | ||
| 588 | if (!test_bit(STRIPE_HANDLE, &sh->state)) | 688 | if (!test_bit(STRIPE_HANDLE, &sh->state)) |
| 589 | atomic_inc(&conf->active_stripes); | 689 | atomic_inc(&conf->active_stripes); |
| 590 | if (list_empty(&sh->lru) && | 690 | if (list_empty(&sh->lru) && |
| 691 | !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state) && | ||
| 591 | !test_bit(STRIPE_EXPANDING, &sh->state)) | 692 | !test_bit(STRIPE_EXPANDING, &sh->state)) |
| 592 | BUG(); | 693 | BUG(); |
| 593 | list_del_init(&sh->lru); | 694 | list_del_init(&sh->lru); |
| @@ -595,6 +696,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector, | |||
| 595 | sh->group->stripes_cnt--; | 696 | sh->group->stripes_cnt--; |
| 596 | sh->group = NULL; | 697 | sh->group = NULL; |
| 597 | } | 698 | } |
| 699 | spin_unlock(&conf->device_lock); | ||
| 598 | } | 700 | } |
| 599 | } | 701 | } |
| 600 | } while (sh == NULL); | 702 | } while (sh == NULL); |
| @@ -602,7 +704,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector, | |||
| 602 | if (sh) | 704 | if (sh) |
| 603 | atomic_inc(&sh->count); | 705 | atomic_inc(&sh->count); |
| 604 | 706 | ||
| 605 | spin_unlock_irq(&conf->device_lock); | 707 | spin_unlock_irq(conf->hash_locks + hash); |
| 606 | return sh; | 708 | return sh; |
| 607 | } | 709 | } |
| 608 | 710 | ||
| @@ -758,7 +860,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) | |||
| 758 | bi->bi_sector = (sh->sector | 860 | bi->bi_sector = (sh->sector |
| 759 | + rdev->data_offset); | 861 | + rdev->data_offset); |
| 760 | if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) | 862 | if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) |
| 761 | bi->bi_rw |= REQ_FLUSH; | 863 | bi->bi_rw |= REQ_NOMERGE; |
| 762 | 864 | ||
| 763 | bi->bi_vcnt = 1; | 865 | bi->bi_vcnt = 1; |
| 764 | bi->bi_io_vec[0].bv_len = STRIPE_SIZE; | 866 | bi->bi_io_vec[0].bv_len = STRIPE_SIZE; |
| @@ -1582,7 +1684,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) | |||
| 1582 | put_cpu(); | 1684 | put_cpu(); |
| 1583 | } | 1685 | } |
| 1584 | 1686 | ||
| 1585 | static int grow_one_stripe(struct r5conf *conf) | 1687 | static int grow_one_stripe(struct r5conf *conf, int hash) |
| 1586 | { | 1688 | { |
| 1587 | struct stripe_head *sh; | 1689 | struct stripe_head *sh; |
| 1588 | sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); | 1690 | sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); |
| @@ -1598,6 +1700,7 @@ static int grow_one_stripe(struct r5conf *conf) | |||
| 1598 | kmem_cache_free(conf->slab_cache, sh); | 1700 | kmem_cache_free(conf->slab_cache, sh); |
| 1599 | return 0; | 1701 | return 0; |
| 1600 | } | 1702 | } |
| 1703 | sh->hash_lock_index = hash; | ||
| 1601 | /* we just created an active stripe so... */ | 1704 | /* we just created an active stripe so... */ |
| 1602 | atomic_set(&sh->count, 1); | 1705 | atomic_set(&sh->count, 1); |
| 1603 | atomic_inc(&conf->active_stripes); | 1706 | atomic_inc(&conf->active_stripes); |
| @@ -1610,6 +1713,7 @@ static int grow_stripes(struct r5conf *conf, int num) | |||
| 1610 | { | 1713 | { |
| 1611 | struct kmem_cache *sc; | 1714 | struct kmem_cache *sc; |
| 1612 | int devs = max(conf->raid_disks, conf->previous_raid_disks); | 1715 | int devs = max(conf->raid_disks, conf->previous_raid_disks); |
| 1716 | int hash; | ||
| 1613 | 1717 | ||
| 1614 | if (conf->mddev->gendisk) | 1718 | if (conf->mddev->gendisk) |
| 1615 | sprintf(conf->cache_name[0], | 1719 | sprintf(conf->cache_name[0], |
| @@ -1627,9 +1731,13 @@ static int grow_stripes(struct r5conf *conf, int num) | |||
| 1627 | return 1; | 1731 | return 1; |
| 1628 | conf->slab_cache = sc; | 1732 | conf->slab_cache = sc; |
| 1629 | conf->pool_size = devs; | 1733 | conf->pool_size = devs; |
| 1630 | while (num--) | 1734 | hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; |
| 1631 | if (!grow_one_stripe(conf)) | 1735 | while (num--) { |
| 1736 | if (!grow_one_stripe(conf, hash)) | ||
| 1632 | return 1; | 1737 | return 1; |
| 1738 | conf->max_nr_stripes++; | ||
| 1739 | hash = (hash + 1) % NR_STRIPE_HASH_LOCKS; | ||
| 1740 | } | ||
| 1633 | return 0; | 1741 | return 0; |
| 1634 | } | 1742 | } |
| 1635 | 1743 | ||
| @@ -1687,6 +1795,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
| 1687 | int err; | 1795 | int err; |
| 1688 | struct kmem_cache *sc; | 1796 | struct kmem_cache *sc; |
| 1689 | int i; | 1797 | int i; |
| 1798 | int hash, cnt; | ||
| 1690 | 1799 | ||
| 1691 | if (newsize <= conf->pool_size) | 1800 | if (newsize <= conf->pool_size) |
| 1692 | return 0; /* never bother to shrink */ | 1801 | return 0; /* never bother to shrink */ |
| @@ -1726,19 +1835,29 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
| 1726 | * OK, we have enough stripes, start collecting inactive | 1835 | * OK, we have enough stripes, start collecting inactive |
| 1727 | * stripes and copying them over | 1836 | * stripes and copying them over |
| 1728 | */ | 1837 | */ |
| 1838 | hash = 0; | ||
| 1839 | cnt = 0; | ||
| 1729 | list_for_each_entry(nsh, &newstripes, lru) { | 1840 | list_for_each_entry(nsh, &newstripes, lru) { |
| 1730 | spin_lock_irq(&conf->device_lock); | 1841 | lock_device_hash_lock(conf, hash); |
| 1731 | wait_event_lock_irq(conf->wait_for_stripe, | 1842 | wait_event_cmd(conf->wait_for_stripe, |
| 1732 | !list_empty(&conf->inactive_list), | 1843 | !list_empty(conf->inactive_list + hash), |
| 1733 | conf->device_lock); | 1844 | unlock_device_hash_lock(conf, hash), |
| 1734 | osh = get_free_stripe(conf); | 1845 | lock_device_hash_lock(conf, hash)); |
| 1735 | spin_unlock_irq(&conf->device_lock); | 1846 | osh = get_free_stripe(conf, hash); |
| 1847 | unlock_device_hash_lock(conf, hash); | ||
| 1736 | atomic_set(&nsh->count, 1); | 1848 | atomic_set(&nsh->count, 1); |
| 1737 | for(i=0; i<conf->pool_size; i++) | 1849 | for(i=0; i<conf->pool_size; i++) |
| 1738 | nsh->dev[i].page = osh->dev[i].page; | 1850 | nsh->dev[i].page = osh->dev[i].page; |
| 1739 | for( ; i<newsize; i++) | 1851 | for( ; i<newsize; i++) |
| 1740 | nsh->dev[i].page = NULL; | 1852 | nsh->dev[i].page = NULL; |
| 1853 | nsh->hash_lock_index = hash; | ||
| 1741 | kmem_cache_free(conf->slab_cache, osh); | 1854 | kmem_cache_free(conf->slab_cache, osh); |
| 1855 | cnt++; | ||
| 1856 | if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + | ||
| 1857 | !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { | ||
| 1858 | hash++; | ||
| 1859 | cnt = 0; | ||
| 1860 | } | ||
| 1742 | } | 1861 | } |
| 1743 | kmem_cache_destroy(conf->slab_cache); | 1862 | kmem_cache_destroy(conf->slab_cache); |
| 1744 | 1863 | ||
| @@ -1797,13 +1916,13 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
| 1797 | return err; | 1916 | return err; |
| 1798 | } | 1917 | } |
| 1799 | 1918 | ||
| 1800 | static int drop_one_stripe(struct r5conf *conf) | 1919 | static int drop_one_stripe(struct r5conf *conf, int hash) |
| 1801 | { | 1920 | { |
| 1802 | struct stripe_head *sh; | 1921 | struct stripe_head *sh; |
| 1803 | 1922 | ||
| 1804 | spin_lock_irq(&conf->device_lock); | 1923 | spin_lock_irq(conf->hash_locks + hash); |
| 1805 | sh = get_free_stripe(conf); | 1924 | sh = get_free_stripe(conf, hash); |
| 1806 | spin_unlock_irq(&conf->device_lock); | 1925 | spin_unlock_irq(conf->hash_locks + hash); |
| 1807 | if (!sh) | 1926 | if (!sh) |
| 1808 | return 0; | 1927 | return 0; |
| 1809 | BUG_ON(atomic_read(&sh->count)); | 1928 | BUG_ON(atomic_read(&sh->count)); |
| @@ -1815,8 +1934,10 @@ static int drop_one_stripe(struct r5conf *conf) | |||
| 1815 | 1934 | ||
| 1816 | static void shrink_stripes(struct r5conf *conf) | 1935 | static void shrink_stripes(struct r5conf *conf) |
| 1817 | { | 1936 | { |
| 1818 | while (drop_one_stripe(conf)) | 1937 | int hash; |
| 1819 | ; | 1938 | for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++) |
| 1939 | while (drop_one_stripe(conf, hash)) | ||
| 1940 | ; | ||
| 1820 | 1941 | ||
| 1821 | if (conf->slab_cache) | 1942 | if (conf->slab_cache) |
| 1822 | kmem_cache_destroy(conf->slab_cache); | 1943 | kmem_cache_destroy(conf->slab_cache); |
| @@ -1921,6 +2042,9 @@ static void raid5_end_read_request(struct bio * bi, int error) | |||
| 1921 | mdname(conf->mddev), bdn); | 2042 | mdname(conf->mddev), bdn); |
| 1922 | else | 2043 | else |
| 1923 | retry = 1; | 2044 | retry = 1; |
| 2045 | if (set_bad && test_bit(In_sync, &rdev->flags) | ||
| 2046 | && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) | ||
| 2047 | retry = 1; | ||
| 1924 | if (retry) | 2048 | if (retry) |
| 1925 | if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { | 2049 | if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { |
| 1926 | set_bit(R5_ReadError, &sh->dev[i].flags); | 2050 | set_bit(R5_ReadError, &sh->dev[i].flags); |
| @@ -3900,7 +4024,8 @@ static void raid5_activate_delayed(struct r5conf *conf) | |||
| 3900 | } | 4024 | } |
| 3901 | } | 4025 | } |
| 3902 | 4026 | ||
| 3903 | static void activate_bit_delay(struct r5conf *conf) | 4027 | static void activate_bit_delay(struct r5conf *conf, |
| 4028 | struct list_head *temp_inactive_list) | ||
| 3904 | { | 4029 | { |
| 3905 | /* device_lock is held */ | 4030 | /* device_lock is held */ |
| 3906 | struct list_head head; | 4031 | struct list_head head; |
| @@ -3908,9 +4033,11 @@ static void activate_bit_delay(struct r5conf *conf) | |||
| 3908 | list_del_init(&conf->bitmap_list); | 4033 | list_del_init(&conf->bitmap_list); |
| 3909 | while (!list_empty(&head)) { | 4034 | while (!list_empty(&head)) { |
| 3910 | struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); | 4035 | struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); |
| 4036 | int hash; | ||
| 3911 | list_del_init(&sh->lru); | 4037 | list_del_init(&sh->lru); |
| 3912 | atomic_inc(&sh->count); | 4038 | atomic_inc(&sh->count); |
| 3913 | __release_stripe(conf, sh); | 4039 | hash = sh->hash_lock_index; |
| 4040 | __release_stripe(conf, sh, &temp_inactive_list[hash]); | ||
| 3914 | } | 4041 | } |
| 3915 | } | 4042 | } |
| 3916 | 4043 | ||
| @@ -3926,7 +4053,7 @@ int md_raid5_congested(struct mddev *mddev, int bits) | |||
| 3926 | return 1; | 4053 | return 1; |
| 3927 | if (conf->quiesce) | 4054 | if (conf->quiesce) |
| 3928 | return 1; | 4055 | return 1; |
| 3929 | if (list_empty_careful(&conf->inactive_list)) | 4056 | if (atomic_read(&conf->empty_inactive_list_nr)) |
| 3930 | return 1; | 4057 | return 1; |
| 3931 | 4058 | ||
| 3932 | return 0; | 4059 | return 0; |
| @@ -4256,6 +4383,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) | |||
| 4256 | struct raid5_plug_cb { | 4383 | struct raid5_plug_cb { |
| 4257 | struct blk_plug_cb cb; | 4384 | struct blk_plug_cb cb; |
| 4258 | struct list_head list; | 4385 | struct list_head list; |
| 4386 | struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; | ||
| 4259 | }; | 4387 | }; |
| 4260 | 4388 | ||
| 4261 | static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) | 4389 | static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) |
| @@ -4266,6 +4394,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) | |||
| 4266 | struct mddev *mddev = cb->cb.data; | 4394 | struct mddev *mddev = cb->cb.data; |
| 4267 | struct r5conf *conf = mddev->private; | 4395 | struct r5conf *conf = mddev->private; |
| 4268 | int cnt = 0; | 4396 | int cnt = 0; |
| 4397 | int hash; | ||
| 4269 | 4398 | ||
| 4270 | if (cb->list.next && !list_empty(&cb->list)) { | 4399 | if (cb->list.next && !list_empty(&cb->list)) { |
| 4271 | spin_lock_irq(&conf->device_lock); | 4400 | spin_lock_irq(&conf->device_lock); |
| @@ -4283,11 +4412,14 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) | |||
| 4283 | * STRIPE_ON_RELEASE_LIST could be set here. In that | 4412 | * STRIPE_ON_RELEASE_LIST could be set here. In that |
| 4284 | * case, the count is always > 1 here | 4413 | * case, the count is always > 1 here |
| 4285 | */ | 4414 | */ |
| 4286 | __release_stripe(conf, sh); | 4415 | hash = sh->hash_lock_index; |
| 4416 | __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); | ||
| 4287 | cnt++; | 4417 | cnt++; |
| 4288 | } | 4418 | } |
| 4289 | spin_unlock_irq(&conf->device_lock); | 4419 | spin_unlock_irq(&conf->device_lock); |
| 4290 | } | 4420 | } |
| 4421 | release_inactive_stripe_list(conf, cb->temp_inactive_list, | ||
| 4422 | NR_STRIPE_HASH_LOCKS); | ||
| 4291 | if (mddev->queue) | 4423 | if (mddev->queue) |
| 4292 | trace_block_unplug(mddev->queue, cnt, !from_schedule); | 4424 | trace_block_unplug(mddev->queue, cnt, !from_schedule); |
| 4293 | kfree(cb); | 4425 | kfree(cb); |
| @@ -4308,8 +4440,12 @@ static void release_stripe_plug(struct mddev *mddev, | |||
| 4308 | 4440 | ||
| 4309 | cb = container_of(blk_cb, struct raid5_plug_cb, cb); | 4441 | cb = container_of(blk_cb, struct raid5_plug_cb, cb); |
| 4310 | 4442 | ||
| 4311 | if (cb->list.next == NULL) | 4443 | if (cb->list.next == NULL) { |
| 4444 | int i; | ||
| 4312 | INIT_LIST_HEAD(&cb->list); | 4445 | INIT_LIST_HEAD(&cb->list); |
| 4446 | for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) | ||
| 4447 | INIT_LIST_HEAD(cb->temp_inactive_list + i); | ||
| 4448 | } | ||
| 4313 | 4449 | ||
| 4314 | if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) | 4450 | if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) |
| 4315 | list_add_tail(&sh->lru, &cb->list); | 4451 | list_add_tail(&sh->lru, &cb->list); |
| @@ -4692,14 +4828,19 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk | |||
| 4692 | time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { | 4828 | time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { |
| 4693 | /* Cannot proceed until we've updated the superblock... */ | 4829 | /* Cannot proceed until we've updated the superblock... */ |
| 4694 | wait_event(conf->wait_for_overlap, | 4830 | wait_event(conf->wait_for_overlap, |
| 4695 | atomic_read(&conf->reshape_stripes)==0); | 4831 | atomic_read(&conf->reshape_stripes)==0 |
| 4832 | || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); | ||
| 4833 | if (atomic_read(&conf->reshape_stripes) != 0) | ||
| 4834 | return 0; | ||
| 4696 | mddev->reshape_position = conf->reshape_progress; | 4835 | mddev->reshape_position = conf->reshape_progress; |
| 4697 | mddev->curr_resync_completed = sector_nr; | 4836 | mddev->curr_resync_completed = sector_nr; |
| 4698 | conf->reshape_checkpoint = jiffies; | 4837 | conf->reshape_checkpoint = jiffies; |
| 4699 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | 4838 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
| 4700 | md_wakeup_thread(mddev->thread); | 4839 | md_wakeup_thread(mddev->thread); |
| 4701 | wait_event(mddev->sb_wait, mddev->flags == 0 || | 4840 | wait_event(mddev->sb_wait, mddev->flags == 0 || |
| 4702 | kthread_should_stop()); | 4841 | test_bit(MD_RECOVERY_INTR, &mddev->recovery)); |
| 4842 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) | ||
| 4843 | return 0; | ||
| 4703 | spin_lock_irq(&conf->device_lock); | 4844 | spin_lock_irq(&conf->device_lock); |
| 4704 | conf->reshape_safe = mddev->reshape_position; | 4845 | conf->reshape_safe = mddev->reshape_position; |
| 4705 | spin_unlock_irq(&conf->device_lock); | 4846 | spin_unlock_irq(&conf->device_lock); |
| @@ -4782,7 +4923,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk | |||
| 4782 | >= mddev->resync_max - mddev->curr_resync_completed) { | 4923 | >= mddev->resync_max - mddev->curr_resync_completed) { |
| 4783 | /* Cannot proceed until we've updated the superblock... */ | 4924 | /* Cannot proceed until we've updated the superblock... */ |
| 4784 | wait_event(conf->wait_for_overlap, | 4925 | wait_event(conf->wait_for_overlap, |
| 4785 | atomic_read(&conf->reshape_stripes) == 0); | 4926 | atomic_read(&conf->reshape_stripes) == 0 |
| 4927 | || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); | ||
| 4928 | if (atomic_read(&conf->reshape_stripes) != 0) | ||
| 4929 | goto ret; | ||
| 4786 | mddev->reshape_position = conf->reshape_progress; | 4930 | mddev->reshape_position = conf->reshape_progress; |
| 4787 | mddev->curr_resync_completed = sector_nr; | 4931 | mddev->curr_resync_completed = sector_nr; |
| 4788 | conf->reshape_checkpoint = jiffies; | 4932 | conf->reshape_checkpoint = jiffies; |
| @@ -4790,13 +4934,16 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk | |||
| 4790 | md_wakeup_thread(mddev->thread); | 4934 | md_wakeup_thread(mddev->thread); |
| 4791 | wait_event(mddev->sb_wait, | 4935 | wait_event(mddev->sb_wait, |
| 4792 | !test_bit(MD_CHANGE_DEVS, &mddev->flags) | 4936 | !test_bit(MD_CHANGE_DEVS, &mddev->flags) |
| 4793 | || kthread_should_stop()); | 4937 | || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); |
| 4938 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) | ||
| 4939 | goto ret; | ||
| 4794 | spin_lock_irq(&conf->device_lock); | 4940 | spin_lock_irq(&conf->device_lock); |
| 4795 | conf->reshape_safe = mddev->reshape_position; | 4941 | conf->reshape_safe = mddev->reshape_position; |
| 4796 | spin_unlock_irq(&conf->device_lock); | 4942 | spin_unlock_irq(&conf->device_lock); |
| 4797 | wake_up(&conf->wait_for_overlap); | 4943 | wake_up(&conf->wait_for_overlap); |
| 4798 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); | 4944 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); |
| 4799 | } | 4945 | } |
| 4946 | ret: | ||
| 4800 | return reshape_sectors; | 4947 | return reshape_sectors; |
| 4801 | } | 4948 | } |
| 4802 | 4949 | ||
| @@ -4954,27 +5101,45 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) | |||
| 4954 | } | 5101 | } |
| 4955 | 5102 | ||
| 4956 | static int handle_active_stripes(struct r5conf *conf, int group, | 5103 | static int handle_active_stripes(struct r5conf *conf, int group, |
| 4957 | struct r5worker *worker) | 5104 | struct r5worker *worker, |
| 5105 | struct list_head *temp_inactive_list) | ||
| 4958 | { | 5106 | { |
| 4959 | struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; | 5107 | struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; |
| 4960 | int i, batch_size = 0; | 5108 | int i, batch_size = 0, hash; |
| 5109 | bool release_inactive = false; | ||
| 4961 | 5110 | ||
| 4962 | while (batch_size < MAX_STRIPE_BATCH && | 5111 | while (batch_size < MAX_STRIPE_BATCH && |
| 4963 | (sh = __get_priority_stripe(conf, group)) != NULL) | 5112 | (sh = __get_priority_stripe(conf, group)) != NULL) |
| 4964 | batch[batch_size++] = sh; | 5113 | batch[batch_size++] = sh; |
| 4965 | 5114 | ||
| 4966 | if (batch_size == 0) | 5115 | if (batch_size == 0) { |
| 4967 | return batch_size; | 5116 | for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) |
| 5117 | if (!list_empty(temp_inactive_list + i)) | ||
| 5118 | break; | ||
| 5119 | if (i == NR_STRIPE_HASH_LOCKS) | ||
| 5120 | return batch_size; | ||
| 5121 | release_inactive = true; | ||
| 5122 | } | ||
| 4968 | spin_unlock_irq(&conf->device_lock); | 5123 | spin_unlock_irq(&conf->device_lock); |
| 4969 | 5124 | ||
| 5125 | release_inactive_stripe_list(conf, temp_inactive_list, | ||
| 5126 | NR_STRIPE_HASH_LOCKS); | ||
| 5127 | |||
| 5128 | if (release_inactive) { | ||
| 5129 | spin_lock_irq(&conf->device_lock); | ||
| 5130 | return 0; | ||
| 5131 | } | ||
| 5132 | |||
| 4970 | for (i = 0; i < batch_size; i++) | 5133 | for (i = 0; i < batch_size; i++) |
| 4971 | handle_stripe(batch[i]); | 5134 | handle_stripe(batch[i]); |
| 4972 | 5135 | ||
| 4973 | cond_resched(); | 5136 | cond_resched(); |
| 4974 | 5137 | ||
| 4975 | spin_lock_irq(&conf->device_lock); | 5138 | spin_lock_irq(&conf->device_lock); |
| 4976 | for (i = 0; i < batch_size; i++) | 5139 | for (i = 0; i < batch_size; i++) { |
| 4977 | __release_stripe(conf, batch[i]); | 5140 | hash = batch[i]->hash_lock_index; |
| 5141 | __release_stripe(conf, batch[i], &temp_inactive_list[hash]); | ||
| 5142 | } | ||
| 4978 | return batch_size; | 5143 | return batch_size; |
| 4979 | } | 5144 | } |
| 4980 | 5145 | ||
| @@ -4995,9 +5160,10 @@ static void raid5_do_work(struct work_struct *work) | |||
| 4995 | while (1) { | 5160 | while (1) { |
| 4996 | int batch_size, released; | 5161 | int batch_size, released; |
| 4997 | 5162 | ||
| 4998 | released = release_stripe_list(conf); | 5163 | released = release_stripe_list(conf, worker->temp_inactive_list); |
| 4999 | 5164 | ||
| 5000 | batch_size = handle_active_stripes(conf, group_id, worker); | 5165 | batch_size = handle_active_stripes(conf, group_id, worker, |
| 5166 | worker->temp_inactive_list); | ||
| 5001 | worker->working = false; | 5167 | worker->working = false; |
| 5002 | if (!batch_size && !released) | 5168 | if (!batch_size && !released) |
| 5003 | break; | 5169 | break; |
| @@ -5036,7 +5202,7 @@ static void raid5d(struct md_thread *thread) | |||
| 5036 | struct bio *bio; | 5202 | struct bio *bio; |
| 5037 | int batch_size, released; | 5203 | int batch_size, released; |
| 5038 | 5204 | ||
| 5039 | released = release_stripe_list(conf); | 5205 | released = release_stripe_list(conf, conf->temp_inactive_list); |
| 5040 | 5206 | ||
| 5041 | if ( | 5207 | if ( |
| 5042 | !list_empty(&conf->bitmap_list)) { | 5208 | !list_empty(&conf->bitmap_list)) { |
| @@ -5046,7 +5212,7 @@ static void raid5d(struct md_thread *thread) | |||
| 5046 | bitmap_unplug(mddev->bitmap); | 5212 | bitmap_unplug(mddev->bitmap); |
| 5047 | spin_lock_irq(&conf->device_lock); | 5213 | spin_lock_irq(&conf->device_lock); |
| 5048 | conf->seq_write = conf->seq_flush; | 5214 | conf->seq_write = conf->seq_flush; |
| 5049 | activate_bit_delay(conf); | 5215 | activate_bit_delay(conf, conf->temp_inactive_list); |
| 5050 | } | 5216 | } |
| 5051 | raid5_activate_delayed(conf); | 5217 | raid5_activate_delayed(conf); |
| 5052 | 5218 | ||
| @@ -5060,7 +5226,8 @@ static void raid5d(struct md_thread *thread) | |||
| 5060 | handled++; | 5226 | handled++; |
| 5061 | } | 5227 | } |
| 5062 | 5228 | ||
| 5063 | batch_size = handle_active_stripes(conf, ANY_GROUP, NULL); | 5229 | batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, |
| 5230 | conf->temp_inactive_list); | ||
| 5064 | if (!batch_size && !released) | 5231 | if (!batch_size && !released) |
| 5065 | break; | 5232 | break; |
| 5066 | handled += batch_size; | 5233 | handled += batch_size; |
| @@ -5096,22 +5263,29 @@ raid5_set_cache_size(struct mddev *mddev, int size) | |||
| 5096 | { | 5263 | { |
| 5097 | struct r5conf *conf = mddev->private; | 5264 | struct r5conf *conf = mddev->private; |
| 5098 | int err; | 5265 | int err; |
| 5266 | int hash; | ||
| 5099 | 5267 | ||
| 5100 | if (size <= 16 || size > 32768) | 5268 | if (size <= 16 || size > 32768) |
| 5101 | return -EINVAL; | 5269 | return -EINVAL; |
| 5270 | hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; | ||
| 5102 | while (size < conf->max_nr_stripes) { | 5271 | while (size < conf->max_nr_stripes) { |
| 5103 | if (drop_one_stripe(conf)) | 5272 | if (drop_one_stripe(conf, hash)) |
| 5104 | conf->max_nr_stripes--; | 5273 | conf->max_nr_stripes--; |
| 5105 | else | 5274 | else |
| 5106 | break; | 5275 | break; |
| 5276 | hash--; | ||
| 5277 | if (hash < 0) | ||
| 5278 | hash = NR_STRIPE_HASH_LOCKS - 1; | ||
| 5107 | } | 5279 | } |
| 5108 | err = md_allow_write(mddev); | 5280 | err = md_allow_write(mddev); |
| 5109 | if (err) | 5281 | if (err) |
| 5110 | return err; | 5282 | return err; |
| 5283 | hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; | ||
| 5111 | while (size > conf->max_nr_stripes) { | 5284 | while (size > conf->max_nr_stripes) { |
| 5112 | if (grow_one_stripe(conf)) | 5285 | if (grow_one_stripe(conf, hash)) |
| 5113 | conf->max_nr_stripes++; | 5286 | conf->max_nr_stripes++; |
| 5114 | else break; | 5287 | else break; |
| 5288 | hash = (hash + 1) % NR_STRIPE_HASH_LOCKS; | ||
| 5115 | } | 5289 | } |
| 5116 | return 0; | 5290 | return 0; |
| 5117 | } | 5291 | } |
| @@ -5199,15 +5373,18 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page) | |||
| 5199 | return 0; | 5373 | return 0; |
| 5200 | } | 5374 | } |
| 5201 | 5375 | ||
| 5202 | static int alloc_thread_groups(struct r5conf *conf, int cnt); | 5376 | static int alloc_thread_groups(struct r5conf *conf, int cnt, |
| 5377 | int *group_cnt, | ||
| 5378 | int *worker_cnt_per_group, | ||
| 5379 | struct r5worker_group **worker_groups); | ||
| 5203 | static ssize_t | 5380 | static ssize_t |
| 5204 | raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) | 5381 | raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) |
| 5205 | { | 5382 | { |
| 5206 | struct r5conf *conf = mddev->private; | 5383 | struct r5conf *conf = mddev->private; |
| 5207 | unsigned long new; | 5384 | unsigned long new; |
| 5208 | int err; | 5385 | int err; |
| 5209 | struct r5worker_group *old_groups; | 5386 | struct r5worker_group *new_groups, *old_groups; |
| 5210 | int old_group_cnt; | 5387 | int group_cnt, worker_cnt_per_group; |
| 5211 | 5388 | ||
| 5212 | if (len >= PAGE_SIZE) | 5389 | if (len >= PAGE_SIZE) |
| 5213 | return -EINVAL; | 5390 | return -EINVAL; |
| @@ -5223,14 +5400,19 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) | |||
| 5223 | mddev_suspend(mddev); | 5400 | mddev_suspend(mddev); |
| 5224 | 5401 | ||
| 5225 | old_groups = conf->worker_groups; | 5402 | old_groups = conf->worker_groups; |
| 5226 | old_group_cnt = conf->worker_cnt_per_group; | 5403 | if (old_groups) |
| 5404 | flush_workqueue(raid5_wq); | ||
| 5405 | |||
| 5406 | err = alloc_thread_groups(conf, new, | ||
| 5407 | &group_cnt, &worker_cnt_per_group, | ||
| 5408 | &new_groups); | ||
| 5409 | if (!err) { | ||
| 5410 | spin_lock_irq(&conf->device_lock); | ||
| 5411 | conf->group_cnt = group_cnt; | ||
| 5412 | conf->worker_cnt_per_group = worker_cnt_per_group; | ||
| 5413 | conf->worker_groups = new_groups; | ||
| 5414 | spin_unlock_irq(&conf->device_lock); | ||
| 5227 | 5415 | ||
| 5228 | conf->worker_groups = NULL; | ||
| 5229 | err = alloc_thread_groups(conf, new); | ||
| 5230 | if (err) { | ||
| 5231 | conf->worker_groups = old_groups; | ||
| 5232 | conf->worker_cnt_per_group = old_group_cnt; | ||
| 5233 | } else { | ||
| 5234 | if (old_groups) | 5416 | if (old_groups) |
| 5235 | kfree(old_groups[0].workers); | 5417 | kfree(old_groups[0].workers); |
| 5236 | kfree(old_groups); | 5418 | kfree(old_groups); |
| @@ -5260,40 +5442,47 @@ static struct attribute_group raid5_attrs_group = { | |||
| 5260 | .attrs = raid5_attrs, | 5442 | .attrs = raid5_attrs, |
| 5261 | }; | 5443 | }; |
| 5262 | 5444 | ||
| 5263 | static int alloc_thread_groups(struct r5conf *conf, int cnt) | 5445 | static int alloc_thread_groups(struct r5conf *conf, int cnt, |
| 5446 | int *group_cnt, | ||
| 5447 | int *worker_cnt_per_group, | ||
| 5448 | struct r5worker_group **worker_groups) | ||
| 5264 | { | 5449 | { |
| 5265 | int i, j; | 5450 | int i, j, k; |
| 5266 | ssize_t size; | 5451 | ssize_t size; |
| 5267 | struct r5worker *workers; | 5452 | struct r5worker *workers; |
| 5268 | 5453 | ||
| 5269 | conf->worker_cnt_per_group = cnt; | 5454 | *worker_cnt_per_group = cnt; |
| 5270 | if (cnt == 0) { | 5455 | if (cnt == 0) { |
| 5271 | conf->worker_groups = NULL; | 5456 | *group_cnt = 0; |
| 5457 | *worker_groups = NULL; | ||
| 5272 | return 0; | 5458 | return 0; |
| 5273 | } | 5459 | } |
| 5274 | conf->group_cnt = num_possible_nodes(); | 5460 | *group_cnt = num_possible_nodes(); |
| 5275 | size = sizeof(struct r5worker) * cnt; | 5461 | size = sizeof(struct r5worker) * cnt; |
| 5276 | workers = kzalloc(size * conf->group_cnt, GFP_NOIO); | 5462 | workers = kzalloc(size * *group_cnt, GFP_NOIO); |
| 5277 | conf->worker_groups = kzalloc(sizeof(struct r5worker_group) * | 5463 | *worker_groups = kzalloc(sizeof(struct r5worker_group) * |
| 5278 | conf->group_cnt, GFP_NOIO); | 5464 | *group_cnt, GFP_NOIO); |
| 5279 | if (!conf->worker_groups || !workers) { | 5465 | if (!*worker_groups || !workers) { |
| 5280 | kfree(workers); | 5466 | kfree(workers); |
| 5281 | kfree(conf->worker_groups); | 5467 | kfree(*worker_groups); |
| 5282 | conf->worker_groups = NULL; | ||
| 5283 | return -ENOMEM; | 5468 | return -ENOMEM; |
| 5284 | } | 5469 | } |
| 5285 | 5470 | ||
| 5286 | for (i = 0; i < conf->group_cnt; i++) { | 5471 | for (i = 0; i < *group_cnt; i++) { |
| 5287 | struct r5worker_group *group; | 5472 | struct r5worker_group *group; |
| 5288 | 5473 | ||
| 5289 | group = &conf->worker_groups[i]; | 5474 | group = worker_groups[i]; |
| 5290 | INIT_LIST_HEAD(&group->handle_list); | 5475 | INIT_LIST_HEAD(&group->handle_list); |
| 5291 | group->conf = conf; | 5476 | group->conf = conf; |
| 5292 | group->workers = workers + i * cnt; | 5477 | group->workers = workers + i * cnt; |
| 5293 | 5478 | ||
| 5294 | for (j = 0; j < cnt; j++) { | 5479 | for (j = 0; j < cnt; j++) { |
| 5295 | group->workers[j].group = group; | 5480 | struct r5worker *worker = group->workers + j; |
| 5296 | INIT_WORK(&group->workers[j].work, raid5_do_work); | 5481 | worker->group = group; |
| 5482 | INIT_WORK(&worker->work, raid5_do_work); | ||
| 5483 | |||
| 5484 | for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++) | ||
| 5485 | INIT_LIST_HEAD(worker->temp_inactive_list + k); | ||
| 5297 | } | 5486 | } |
| 5298 | } | 5487 | } |
| 5299 | 5488 | ||
| @@ -5444,6 +5633,9 @@ static struct r5conf *setup_conf(struct mddev *mddev) | |||
| 5444 | struct md_rdev *rdev; | 5633 | struct md_rdev *rdev; |
| 5445 | struct disk_info *disk; | 5634 | struct disk_info *disk; |
| 5446 | char pers_name[6]; | 5635 | char pers_name[6]; |
| 5636 | int i; | ||
| 5637 | int group_cnt, worker_cnt_per_group; | ||
| 5638 | struct r5worker_group *new_group; | ||
| 5447 | 5639 | ||
| 5448 | if (mddev->new_level != 5 | 5640 | if (mddev->new_level != 5 |
| 5449 | && mddev->new_level != 4 | 5641 | && mddev->new_level != 4 |
| @@ -5478,7 +5670,12 @@ static struct r5conf *setup_conf(struct mddev *mddev) | |||
| 5478 | if (conf == NULL) | 5670 | if (conf == NULL) |
| 5479 | goto abort; | 5671 | goto abort; |
| 5480 | /* Don't enable multi-threading by default*/ | 5672 | /* Don't enable multi-threading by default*/ |
| 5481 | if (alloc_thread_groups(conf, 0)) | 5673 | if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, |
| 5674 | &new_group)) { | ||
| 5675 | conf->group_cnt = group_cnt; | ||
| 5676 | conf->worker_cnt_per_group = worker_cnt_per_group; | ||
| 5677 | conf->worker_groups = new_group; | ||
| 5678 | } else | ||
| 5482 | goto abort; | 5679 | goto abort; |
| 5483 | spin_lock_init(&conf->device_lock); | 5680 | spin_lock_init(&conf->device_lock); |
| 5484 | seqcount_init(&conf->gen_lock); | 5681 | seqcount_init(&conf->gen_lock); |
| @@ -5488,7 +5685,6 @@ static struct r5conf *setup_conf(struct mddev *mddev) | |||
| 5488 | INIT_LIST_HEAD(&conf->hold_list); | 5685 | INIT_LIST_HEAD(&conf->hold_list); |
| 5489 | INIT_LIST_HEAD(&conf->delayed_list); | 5686 | INIT_LIST_HEAD(&conf->delayed_list); |
| 5490 | INIT_LIST_HEAD(&conf->bitmap_list); | 5687 | INIT_LIST_HEAD(&conf->bitmap_list); |
| 5491 | INIT_LIST_HEAD(&conf->inactive_list); | ||
| 5492 | init_llist_head(&conf->released_stripes); | 5688 | init_llist_head(&conf->released_stripes); |
| 5493 | atomic_set(&conf->active_stripes, 0); | 5689 | atomic_set(&conf->active_stripes, 0); |
| 5494 | atomic_set(&conf->preread_active_stripes, 0); | 5690 | atomic_set(&conf->preread_active_stripes, 0); |
| @@ -5514,6 +5710,21 @@ static struct r5conf *setup_conf(struct mddev *mddev) | |||
| 5514 | if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) | 5710 | if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) |
| 5515 | goto abort; | 5711 | goto abort; |
| 5516 | 5712 | ||
| 5713 | /* We init hash_locks[0] separately to that it can be used | ||
| 5714 | * as the reference lock in the spin_lock_nest_lock() call | ||
| 5715 | * in lock_all_device_hash_locks_irq in order to convince | ||
| 5716 | * lockdep that we know what we are doing. | ||
| 5717 | */ | ||
| 5718 | spin_lock_init(conf->hash_locks); | ||
| 5719 | for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) | ||
| 5720 | spin_lock_init(conf->hash_locks + i); | ||
| 5721 | |||
| 5722 | for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) | ||
| 5723 | INIT_LIST_HEAD(conf->inactive_list + i); | ||
| 5724 | |||
| 5725 | for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) | ||
| 5726 | INIT_LIST_HEAD(conf->temp_inactive_list + i); | ||
| 5727 | |||
| 5517 | conf->level = mddev->new_level; | 5728 | conf->level = mddev->new_level; |
| 5518 | if (raid5_alloc_percpu(conf) != 0) | 5729 | if (raid5_alloc_percpu(conf) != 0) |
| 5519 | goto abort; | 5730 | goto abort; |
| @@ -5554,7 +5765,6 @@ static struct r5conf *setup_conf(struct mddev *mddev) | |||
| 5554 | else | 5765 | else |
| 5555 | conf->max_degraded = 1; | 5766 | conf->max_degraded = 1; |
| 5556 | conf->algorithm = mddev->new_layout; | 5767 | conf->algorithm = mddev->new_layout; |
| 5557 | conf->max_nr_stripes = NR_STRIPES; | ||
| 5558 | conf->reshape_progress = mddev->reshape_position; | 5768 | conf->reshape_progress = mddev->reshape_position; |
| 5559 | if (conf->reshape_progress != MaxSector) { | 5769 | if (conf->reshape_progress != MaxSector) { |
| 5560 | conf->prev_chunk_sectors = mddev->chunk_sectors; | 5770 | conf->prev_chunk_sectors = mddev->chunk_sectors; |
| @@ -5563,7 +5773,8 @@ static struct r5conf *setup_conf(struct mddev *mddev) | |||
| 5563 | 5773 | ||
| 5564 | memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + | 5774 | memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + |
| 5565 | max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; | 5775 | max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; |
| 5566 | if (grow_stripes(conf, conf->max_nr_stripes)) { | 5776 | atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); |
| 5777 | if (grow_stripes(conf, NR_STRIPES)) { | ||
| 5567 | printk(KERN_ERR | 5778 | printk(KERN_ERR |
| 5568 | "md/raid:%s: couldn't allocate %dkB for buffers\n", | 5779 | "md/raid:%s: couldn't allocate %dkB for buffers\n", |
| 5569 | mdname(mddev), memory); | 5780 | mdname(mddev), memory); |
| @@ -6369,12 +6580,18 @@ static int raid5_start_reshape(struct mddev *mddev) | |||
| 6369 | if (!mddev->sync_thread) { | 6580 | if (!mddev->sync_thread) { |
| 6370 | mddev->recovery = 0; | 6581 | mddev->recovery = 0; |
| 6371 | spin_lock_irq(&conf->device_lock); | 6582 | spin_lock_irq(&conf->device_lock); |
| 6583 | write_seqcount_begin(&conf->gen_lock); | ||
| 6372 | mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; | 6584 | mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; |
| 6585 | mddev->new_chunk_sectors = | ||
| 6586 | conf->chunk_sectors = conf->prev_chunk_sectors; | ||
| 6587 | mddev->new_layout = conf->algorithm = conf->prev_algo; | ||
| 6373 | rdev_for_each(rdev, mddev) | 6588 | rdev_for_each(rdev, mddev) |
| 6374 | rdev->new_data_offset = rdev->data_offset; | 6589 | rdev->new_data_offset = rdev->data_offset; |
| 6375 | smp_wmb(); | 6590 | smp_wmb(); |
| 6591 | conf->generation --; | ||
| 6376 | conf->reshape_progress = MaxSector; | 6592 | conf->reshape_progress = MaxSector; |
| 6377 | mddev->reshape_position = MaxSector; | 6593 | mddev->reshape_position = MaxSector; |
| 6594 | write_seqcount_end(&conf->gen_lock); | ||
| 6378 | spin_unlock_irq(&conf->device_lock); | 6595 | spin_unlock_irq(&conf->device_lock); |
| 6379 | return -EAGAIN; | 6596 | return -EAGAIN; |
| 6380 | } | 6597 | } |
| @@ -6462,27 +6679,28 @@ static void raid5_quiesce(struct mddev *mddev, int state) | |||
| 6462 | break; | 6679 | break; |
| 6463 | 6680 | ||
| 6464 | case 1: /* stop all writes */ | 6681 | case 1: /* stop all writes */ |
| 6465 | spin_lock_irq(&conf->device_lock); | 6682 | lock_all_device_hash_locks_irq(conf); |
| 6466 | /* '2' tells resync/reshape to pause so that all | 6683 | /* '2' tells resync/reshape to pause so that all |
| 6467 | * active stripes can drain | 6684 | * active stripes can drain |
| 6468 | */ | 6685 | */ |
| 6469 | conf->quiesce = 2; | 6686 | conf->quiesce = 2; |
| 6470 | wait_event_lock_irq(conf->wait_for_stripe, | 6687 | wait_event_cmd(conf->wait_for_stripe, |
| 6471 | atomic_read(&conf->active_stripes) == 0 && | 6688 | atomic_read(&conf->active_stripes) == 0 && |
| 6472 | atomic_read(&conf->active_aligned_reads) == 0, | 6689 | atomic_read(&conf->active_aligned_reads) == 0, |
| 6473 | conf->device_lock); | 6690 | unlock_all_device_hash_locks_irq(conf), |
| 6691 | lock_all_device_hash_locks_irq(conf)); | ||
| 6474 | conf->quiesce = 1; | 6692 | conf->quiesce = 1; |
| 6475 | spin_unlock_irq(&conf->device_lock); | 6693 | unlock_all_device_hash_locks_irq(conf); |
| 6476 | /* allow reshape to continue */ | 6694 | /* allow reshape to continue */ |
| 6477 | wake_up(&conf->wait_for_overlap); | 6695 | wake_up(&conf->wait_for_overlap); |
| 6478 | break; | 6696 | break; |
| 6479 | 6697 | ||
| 6480 | case 0: /* re-enable writes */ | 6698 | case 0: /* re-enable writes */ |
| 6481 | spin_lock_irq(&conf->device_lock); | 6699 | lock_all_device_hash_locks_irq(conf); |
| 6482 | conf->quiesce = 0; | 6700 | conf->quiesce = 0; |
| 6483 | wake_up(&conf->wait_for_stripe); | 6701 | wake_up(&conf->wait_for_stripe); |
| 6484 | wake_up(&conf->wait_for_overlap); | 6702 | wake_up(&conf->wait_for_overlap); |
| 6485 | spin_unlock_irq(&conf->device_lock); | 6703 | unlock_all_device_hash_locks_irq(conf); |
| 6486 | break; | 6704 | break; |
| 6487 | } | 6705 | } |
| 6488 | } | 6706 | } |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index b42e6b462eda..01ad8ae8f578 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
| @@ -205,6 +205,7 @@ struct stripe_head { | |||
| 205 | short pd_idx; /* parity disk index */ | 205 | short pd_idx; /* parity disk index */ |
| 206 | short qd_idx; /* 'Q' disk index for raid6 */ | 206 | short qd_idx; /* 'Q' disk index for raid6 */ |
| 207 | short ddf_layout;/* use DDF ordering to calculate Q */ | 207 | short ddf_layout;/* use DDF ordering to calculate Q */ |
| 208 | short hash_lock_index; | ||
| 208 | unsigned long state; /* state flags */ | 209 | unsigned long state; /* state flags */ |
| 209 | atomic_t count; /* nr of active thread/requests */ | 210 | atomic_t count; /* nr of active thread/requests */ |
| 210 | int bm_seq; /* sequence number for bitmap flushes */ | 211 | int bm_seq; /* sequence number for bitmap flushes */ |
| @@ -367,9 +368,18 @@ struct disk_info { | |||
| 367 | struct md_rdev *rdev, *replacement; | 368 | struct md_rdev *rdev, *replacement; |
| 368 | }; | 369 | }; |
| 369 | 370 | ||
| 371 | /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64. | ||
| 372 | * This is because we sometimes take all the spinlocks | ||
| 373 | * and creating that much locking depth can cause | ||
| 374 | * problems. | ||
| 375 | */ | ||
| 376 | #define NR_STRIPE_HASH_LOCKS 8 | ||
| 377 | #define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1) | ||
| 378 | |||
| 370 | struct r5worker { | 379 | struct r5worker { |
| 371 | struct work_struct work; | 380 | struct work_struct work; |
| 372 | struct r5worker_group *group; | 381 | struct r5worker_group *group; |
| 382 | struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; | ||
| 373 | bool working; | 383 | bool working; |
| 374 | }; | 384 | }; |
| 375 | 385 | ||
| @@ -382,6 +392,8 @@ struct r5worker_group { | |||
| 382 | 392 | ||
| 383 | struct r5conf { | 393 | struct r5conf { |
| 384 | struct hlist_head *stripe_hashtbl; | 394 | struct hlist_head *stripe_hashtbl; |
| 395 | /* only protect corresponding hash list and inactive_list */ | ||
| 396 | spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS]; | ||
| 385 | struct mddev *mddev; | 397 | struct mddev *mddev; |
| 386 | int chunk_sectors; | 398 | int chunk_sectors; |
| 387 | int level, algorithm; | 399 | int level, algorithm; |
| @@ -462,7 +474,8 @@ struct r5conf { | |||
| 462 | * Free stripes pool | 474 | * Free stripes pool |
| 463 | */ | 475 | */ |
| 464 | atomic_t active_stripes; | 476 | atomic_t active_stripes; |
| 465 | struct list_head inactive_list; | 477 | struct list_head inactive_list[NR_STRIPE_HASH_LOCKS]; |
| 478 | atomic_t empty_inactive_list_nr; | ||
| 466 | struct llist_head released_stripes; | 479 | struct llist_head released_stripes; |
| 467 | wait_queue_head_t wait_for_stripe; | 480 | wait_queue_head_t wait_for_stripe; |
| 468 | wait_queue_head_t wait_for_overlap; | 481 | wait_queue_head_t wait_for_overlap; |
| @@ -477,6 +490,7 @@ struct r5conf { | |||
| 477 | * the new thread here until we fully activate the array. | 490 | * the new thread here until we fully activate the array. |
| 478 | */ | 491 | */ |
| 479 | struct md_thread *thread; | 492 | struct md_thread *thread; |
| 493 | struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; | ||
| 480 | struct r5worker_group *worker_groups; | 494 | struct r5worker_group *worker_groups; |
| 481 | int group_cnt; | 495 | int group_cnt; |
| 482 | int worker_cnt_per_group; | 496 | int worker_cnt_per_group; |
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c index 36513e896413..65cab70fefcb 100644 --- a/drivers/media/platform/m2m-deinterlace.c +++ b/drivers/media/platform/m2m-deinterlace.c | |||
| @@ -341,8 +341,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op, | |||
| 341 | ctx->xt->dir = DMA_MEM_TO_MEM; | 341 | ctx->xt->dir = DMA_MEM_TO_MEM; |
| 342 | ctx->xt->src_sgl = false; | 342 | ctx->xt->src_sgl = false; |
| 343 | ctx->xt->dst_sgl = true; | 343 | ctx->xt->dst_sgl = true; |
| 344 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | | 344 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; |
| 345 | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP; | ||
| 346 | 345 | ||
| 347 | tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags); | 346 | tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags); |
| 348 | if (tx == NULL) { | 347 | if (tx == NULL) { |
diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c index 6a74ce040d28..ccdadd623a3a 100644 --- a/drivers/media/platform/timblogiw.c +++ b/drivers/media/platform/timblogiw.c | |||
| @@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) | |||
| 565 | 565 | ||
| 566 | desc = dmaengine_prep_slave_sg(fh->chan, | 566 | desc = dmaengine_prep_slave_sg(fh->chan, |
| 567 | buf->sg, sg_elems, DMA_DEV_TO_MEM, | 567 | buf->sg, sg_elems, DMA_DEV_TO_MEM, |
| 568 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | 568 | DMA_PREP_INTERRUPT); |
| 569 | if (!desc) { | 569 | if (!desc) { |
| 570 | spin_lock_irq(&fh->queue_lock); | 570 | spin_lock_irq(&fh->queue_lock); |
| 571 | list_del_init(&vb->queue); | 571 | list_del_init(&vb->queue); |
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c index 08b18f3f5264..9e2b985293fc 100644 --- a/drivers/misc/carma/carma-fpga.c +++ b/drivers/misc/carma/carma-fpga.c | |||
| @@ -633,8 +633,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf) | |||
| 633 | struct dma_async_tx_descriptor *tx; | 633 | struct dma_async_tx_descriptor *tx; |
| 634 | dma_cookie_t cookie; | 634 | dma_cookie_t cookie; |
| 635 | dma_addr_t dst, src; | 635 | dma_addr_t dst, src; |
| 636 | unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP | | 636 | unsigned long dma_flags = 0; |
| 637 | DMA_COMPL_SKIP_SRC_UNMAP; | ||
| 638 | 637 | ||
| 639 | dst_sg = buf->vb.sglist; | 638 | dst_sg = buf->vb.sglist; |
| 640 | dst_nents = buf->vb.sglen; | 639 | dst_nents = buf->vb.sglen; |
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index ef8956568c3a..157b570ba343 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c | |||
| @@ -308,8 +308,7 @@ static void sdio_acpi_set_handle(struct sdio_func *func) | |||
| 308 | struct mmc_host *host = func->card->host; | 308 | struct mmc_host *host = func->card->host; |
| 309 | u64 addr = (host->slotno << 16) | func->num; | 309 | u64 addr = (host->slotno << 16) | func->num; |
| 310 | 310 | ||
| 311 | ACPI_HANDLE_SET(&func->dev, | 311 | acpi_preset_companion(&func->dev, ACPI_HANDLE(host->parent), addr); |
| 312 | acpi_get_child(ACPI_HANDLE(host->parent), addr)); | ||
| 313 | } | 312 | } |
| 314 | #else | 313 | #else |
| 315 | static inline void sdio_acpi_set_handle(struct sdio_func *func) {} | 314 | static inline void sdio_acpi_set_handle(struct sdio_func *func) {} |
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index d78a97d4153a..59f08c44abdb 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c | |||
| @@ -375,8 +375,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len, | |||
| 375 | 375 | ||
| 376 | dma_dev = host->dma_chan->device; | 376 | dma_dev = host->dma_chan->device; |
| 377 | 377 | ||
| 378 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | | 378 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; |
| 379 | DMA_COMPL_SKIP_DEST_UNMAP; | ||
| 380 | 379 | ||
| 381 | phys_addr = dma_map_single(dma_dev->dev, p, len, dir); | 380 | phys_addr = dma_map_single(dma_dev->dev, p, len, dir); |
| 382 | if (dma_mapping_error(dma_dev->dev, phys_addr)) { | 381 | if (dma_mapping_error(dma_dev->dev, phys_addr)) { |
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 3dc1a7564d87..8b2752263db9 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c | |||
| @@ -573,8 +573,6 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, | |||
| 573 | dma_dev = chan->device; | 573 | dma_dev = chan->device; |
| 574 | dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); | 574 | dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); |
| 575 | 575 | ||
| 576 | flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; | ||
| 577 | |||
| 578 | if (direction == DMA_TO_DEVICE) { | 576 | if (direction == DMA_TO_DEVICE) { |
| 579 | dma_src = dma_addr; | 577 | dma_src = dma_addr; |
| 580 | dma_dst = host->data_pa; | 578 | dma_dst = host->data_pa; |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index bc8fd362a5aa..0ec2a7e8c8a9 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
| @@ -524,8 +524,9 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
| 524 | goto out; | 524 | goto out; |
| 525 | } | 525 | } |
| 526 | if (bond->params.mode == BOND_MODE_ALB || | 526 | if (bond->params.mode == BOND_MODE_ALB || |
| 527 | bond->params.mode == BOND_MODE_TLB) { | 527 | bond->params.mode == BOND_MODE_TLB || |
| 528 | pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n", | 528 | bond->params.mode == BOND_MODE_8023AD) { |
| 529 | pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n", | ||
| 529 | bond->dev->name, bond->dev->name); | 530 | bond->dev->name, bond->dev->name); |
| 530 | ret = -EINVAL; | 531 | ret = -EINVAL; |
| 531 | goto out; | 532 | goto out; |
| @@ -603,15 +604,14 @@ static ssize_t bonding_store_arp_targets(struct device *d, | |||
| 603 | return restart_syscall(); | 604 | return restart_syscall(); |
| 604 | 605 | ||
| 605 | targets = bond->params.arp_targets; | 606 | targets = bond->params.arp_targets; |
| 606 | newtarget = in_aton(buf + 1); | 607 | if (!in4_pton(buf + 1, -1, (u8 *)&newtarget, -1, NULL) || |
| 608 | IS_IP_TARGET_UNUSABLE_ADDRESS(newtarget)) { | ||
| 609 | pr_err("%s: invalid ARP target %pI4 specified for addition\n", | ||
| 610 | bond->dev->name, &newtarget); | ||
| 611 | goto out; | ||
| 612 | } | ||
| 607 | /* look for adds */ | 613 | /* look for adds */ |
| 608 | if (buf[0] == '+') { | 614 | if (buf[0] == '+') { |
| 609 | if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { | ||
| 610 | pr_err("%s: invalid ARP target %pI4 specified for addition\n", | ||
| 611 | bond->dev->name, &newtarget); | ||
| 612 | goto out; | ||
| 613 | } | ||
| 614 | |||
| 615 | if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */ | 615 | if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */ |
| 616 | pr_err("%s: ARP target %pI4 is already present\n", | 616 | pr_err("%s: ARP target %pI4 is already present\n", |
| 617 | bond->dev->name, &newtarget); | 617 | bond->dev->name, &newtarget); |
| @@ -634,12 +634,6 @@ static ssize_t bonding_store_arp_targets(struct device *d, | |||
| 634 | targets[ind] = newtarget; | 634 | targets[ind] = newtarget; |
| 635 | write_unlock_bh(&bond->lock); | 635 | write_unlock_bh(&bond->lock); |
| 636 | } else if (buf[0] == '-') { | 636 | } else if (buf[0] == '-') { |
| 637 | if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { | ||
| 638 | pr_err("%s: invalid ARP target %pI4 specified for removal\n", | ||
| 639 | bond->dev->name, &newtarget); | ||
| 640 | goto out; | ||
| 641 | } | ||
| 642 | |||
| 643 | ind = bond_get_targets_ip(targets, newtarget); | 637 | ind = bond_get_targets_ip(targets, newtarget); |
| 644 | if (ind == -1) { | 638 | if (ind == -1) { |
| 645 | pr_err("%s: unable to remove nonexistent ARP target %pI4.\n", | 639 | pr_err("%s: unable to remove nonexistent ARP target %pI4.\n", |
| @@ -701,6 +695,8 @@ static ssize_t bonding_store_downdelay(struct device *d, | |||
| 701 | int new_value, ret = count; | 695 | int new_value, ret = count; |
| 702 | struct bonding *bond = to_bond(d); | 696 | struct bonding *bond = to_bond(d); |
| 703 | 697 | ||
| 698 | if (!rtnl_trylock()) | ||
| 699 | return restart_syscall(); | ||
| 704 | if (!(bond->params.miimon)) { | 700 | if (!(bond->params.miimon)) { |
| 705 | pr_err("%s: Unable to set down delay as MII monitoring is disabled\n", | 701 | pr_err("%s: Unable to set down delay as MII monitoring is disabled\n", |
| 706 | bond->dev->name); | 702 | bond->dev->name); |
| @@ -734,6 +730,7 @@ static ssize_t bonding_store_downdelay(struct device *d, | |||
| 734 | } | 730 | } |
| 735 | 731 | ||
| 736 | out: | 732 | out: |
| 733 | rtnl_unlock(); | ||
| 737 | return ret; | 734 | return ret; |
| 738 | } | 735 | } |
| 739 | static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR, | 736 | static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR, |
| @@ -756,6 +753,8 @@ static ssize_t bonding_store_updelay(struct device *d, | |||
| 756 | int new_value, ret = count; | 753 | int new_value, ret = count; |
| 757 | struct bonding *bond = to_bond(d); | 754 | struct bonding *bond = to_bond(d); |
| 758 | 755 | ||
| 756 | if (!rtnl_trylock()) | ||
| 757 | return restart_syscall(); | ||
| 759 | if (!(bond->params.miimon)) { | 758 | if (!(bond->params.miimon)) { |
| 760 | pr_err("%s: Unable to set up delay as MII monitoring is disabled\n", | 759 | pr_err("%s: Unable to set up delay as MII monitoring is disabled\n", |
| 761 | bond->dev->name); | 760 | bond->dev->name); |
| @@ -789,6 +788,7 @@ static ssize_t bonding_store_updelay(struct device *d, | |||
| 789 | } | 788 | } |
| 790 | 789 | ||
| 791 | out: | 790 | out: |
| 791 | rtnl_unlock(); | ||
| 792 | return ret; | 792 | return ret; |
| 793 | } | 793 | } |
| 794 | static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR, | 794 | static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR, |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 77a07a12e77f..ca31286aa028 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
| @@ -63,6 +63,9 @@ | |||
| 63 | (((mode) == BOND_MODE_TLB) || \ | 63 | (((mode) == BOND_MODE_TLB) || \ |
| 64 | ((mode) == BOND_MODE_ALB)) | 64 | ((mode) == BOND_MODE_ALB)) |
| 65 | 65 | ||
| 66 | #define IS_IP_TARGET_UNUSABLE_ADDRESS(a) \ | ||
| 67 | ((htonl(INADDR_BROADCAST) == a) || \ | ||
| 68 | ipv4_is_zeronet(a)) | ||
| 66 | /* | 69 | /* |
| 67 | * Less bad way to call ioctl from within the kernel; this needs to be | 70 | * Less bad way to call ioctl from within the kernel; this needs to be |
| 68 | * done some other way to get the call out of interrupt context. | 71 | * done some other way to get the call out of interrupt context. |
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 5aa5e8146496..c3c4c266b846 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
| @@ -1388,6 +1388,9 @@ static int alx_resume(struct device *dev) | |||
| 1388 | { | 1388 | { |
| 1389 | struct pci_dev *pdev = to_pci_dev(dev); | 1389 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1390 | struct alx_priv *alx = pci_get_drvdata(pdev); | 1390 | struct alx_priv *alx = pci_get_drvdata(pdev); |
| 1391 | struct alx_hw *hw = &alx->hw; | ||
| 1392 | |||
| 1393 | alx_reset_phy(hw); | ||
| 1391 | 1394 | ||
| 1392 | if (!netif_running(alx->dev)) | 1395 | if (!netif_running(alx->dev)) |
| 1393 | return 0; | 1396 | return 0; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 4e01c57d8c8d..a1f66e2c9a86 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
| @@ -1376,7 +1376,6 @@ enum { | |||
| 1376 | BNX2X_SP_RTNL_RX_MODE, | 1376 | BNX2X_SP_RTNL_RX_MODE, |
| 1377 | BNX2X_SP_RTNL_HYPERVISOR_VLAN, | 1377 | BNX2X_SP_RTNL_HYPERVISOR_VLAN, |
| 1378 | BNX2X_SP_RTNL_TX_STOP, | 1378 | BNX2X_SP_RTNL_TX_STOP, |
| 1379 | BNX2X_SP_RTNL_TX_RESUME, | ||
| 1380 | }; | 1379 | }; |
| 1381 | 1380 | ||
| 1382 | struct bnx2x_prev_path_list { | 1381 | struct bnx2x_prev_path_list { |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index dcafbda3e5be..ec96130533cc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
| @@ -2959,6 +2959,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) | |||
| 2959 | 2959 | ||
| 2960 | bp->port.pmf = 0; | 2960 | bp->port.pmf = 0; |
| 2961 | 2961 | ||
| 2962 | /* clear pending work in rtnl task */ | ||
| 2963 | bp->sp_rtnl_state = 0; | ||
| 2964 | smp_mb(); | ||
| 2965 | |||
| 2962 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 2966 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
| 2963 | bnx2x_free_skbs(bp); | 2967 | bnx2x_free_skbs(bp); |
| 2964 | if (CNIC_LOADED(bp)) | 2968 | if (CNIC_LOADED(bp)) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index fcf2761d8828..fdace204b054 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | |||
| @@ -778,11 +778,6 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | |||
| 778 | 778 | ||
| 779 | /* ets may affect cmng configuration: reinit it in hw */ | 779 | /* ets may affect cmng configuration: reinit it in hw */ |
| 780 | bnx2x_set_local_cmng(bp); | 780 | bnx2x_set_local_cmng(bp); |
| 781 | |||
| 782 | set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state); | ||
| 783 | |||
| 784 | schedule_delayed_work(&bp->sp_rtnl_task, 0); | ||
| 785 | |||
| 786 | return; | 781 | return; |
| 787 | case BNX2X_DCBX_STATE_TX_RELEASED: | 782 | case BNX2X_DCBX_STATE_TX_RELEASED: |
| 788 | DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n"); | 783 | DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n"); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e622cc1f96ff..814d0eca9b33 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -577,7 +577,9 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, | |||
| 577 | rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); | 577 | rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); |
| 578 | if (rc) { | 578 | if (rc) { |
| 579 | BNX2X_ERR("DMAE returned failure %d\n", rc); | 579 | BNX2X_ERR("DMAE returned failure %d\n", rc); |
| 580 | #ifdef BNX2X_STOP_ON_ERROR | ||
| 580 | bnx2x_panic(); | 581 | bnx2x_panic(); |
| 582 | #endif | ||
| 581 | } | 583 | } |
| 582 | } | 584 | } |
| 583 | 585 | ||
| @@ -614,7 +616,9 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) | |||
| 614 | rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); | 616 | rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); |
| 615 | if (rc) { | 617 | if (rc) { |
| 616 | BNX2X_ERR("DMAE returned failure %d\n", rc); | 618 | BNX2X_ERR("DMAE returned failure %d\n", rc); |
| 619 | #ifdef BNX2X_STOP_ON_ERROR | ||
| 617 | bnx2x_panic(); | 620 | bnx2x_panic(); |
| 621 | #endif | ||
| 618 | } | 622 | } |
| 619 | } | 623 | } |
| 620 | 624 | ||
| @@ -5231,18 +5235,18 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
| 5231 | 5235 | ||
| 5232 | case EVENT_RING_OPCODE_STOP_TRAFFIC: | 5236 | case EVENT_RING_OPCODE_STOP_TRAFFIC: |
| 5233 | DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); | 5237 | DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); |
| 5238 | bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); | ||
| 5234 | if (f_obj->complete_cmd(bp, f_obj, | 5239 | if (f_obj->complete_cmd(bp, f_obj, |
| 5235 | BNX2X_F_CMD_TX_STOP)) | 5240 | BNX2X_F_CMD_TX_STOP)) |
| 5236 | break; | 5241 | break; |
| 5237 | bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); | ||
| 5238 | goto next_spqe; | 5242 | goto next_spqe; |
| 5239 | 5243 | ||
| 5240 | case EVENT_RING_OPCODE_START_TRAFFIC: | 5244 | case EVENT_RING_OPCODE_START_TRAFFIC: |
| 5241 | DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); | 5245 | DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); |
| 5246 | bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); | ||
| 5242 | if (f_obj->complete_cmd(bp, f_obj, | 5247 | if (f_obj->complete_cmd(bp, f_obj, |
| 5243 | BNX2X_F_CMD_TX_START)) | 5248 | BNX2X_F_CMD_TX_START)) |
| 5244 | break; | 5249 | break; |
| 5245 | bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); | ||
| 5246 | goto next_spqe; | 5250 | goto next_spqe; |
| 5247 | 5251 | ||
| 5248 | case EVENT_RING_OPCODE_FUNCTION_UPDATE: | 5252 | case EVENT_RING_OPCODE_FUNCTION_UPDATE: |
| @@ -9352,6 +9356,10 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) | |||
| 9352 | bnx2x_process_kill_chip_reset(bp, global); | 9356 | bnx2x_process_kill_chip_reset(bp, global); |
| 9353 | barrier(); | 9357 | barrier(); |
| 9354 | 9358 | ||
| 9359 | /* clear errors in PGB */ | ||
| 9360 | if (!CHIP_IS_E1x(bp)) | ||
| 9361 | REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); | ||
| 9362 | |||
| 9355 | /* Recover after reset: */ | 9363 | /* Recover after reset: */ |
| 9356 | /* MCP */ | 9364 | /* MCP */ |
| 9357 | if (global && bnx2x_reset_mcp_comp(bp, val)) | 9365 | if (global && bnx2x_reset_mcp_comp(bp, val)) |
| @@ -9706,11 +9714,10 @@ sp_rtnl_not_reset: | |||
| 9706 | &bp->sp_rtnl_state)) | 9714 | &bp->sp_rtnl_state)) |
| 9707 | bnx2x_pf_set_vfs_vlan(bp); | 9715 | bnx2x_pf_set_vfs_vlan(bp); |
| 9708 | 9716 | ||
| 9709 | if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) | 9717 | if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { |
| 9710 | bnx2x_dcbx_stop_hw_tx(bp); | 9718 | bnx2x_dcbx_stop_hw_tx(bp); |
| 9711 | |||
| 9712 | if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state)) | ||
| 9713 | bnx2x_dcbx_resume_hw_tx(bp); | 9719 | bnx2x_dcbx_resume_hw_tx(bp); |
| 9720 | } | ||
| 9714 | 9721 | ||
| 9715 | /* work which needs rtnl lock not-taken (as it takes the lock itself and | 9722 | /* work which needs rtnl lock not-taken (as it takes the lock itself and |
| 9716 | * can be called from other contexts as well) | 9723 | * can be called from other contexts as well) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 5ecf267dc4cc..3efbb35267c8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | |||
| @@ -2864,6 +2864,17 @@ | |||
| 2864 | #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430 | 2864 | #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430 |
| 2865 | #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434 | 2865 | #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434 |
| 2866 | #define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438 | 2866 | #define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438 |
| 2867 | /* [W 7] Writing 1 to each bit in this register clears a corresponding error | ||
| 2868 | * details register and enables logging new error details. Bit 0 - clears | ||
| 2869 | * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears | ||
| 2870 | * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS | ||
| 2871 | * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32 | ||
| 2872 | * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 - | ||
| 2873 | * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears | ||
| 2874 | * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6 | ||
| 2875 | * - clears TCPL_IN_TWO_RCBS_DETAILS. */ | ||
| 2876 | #define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x943c | ||
| 2877 | |||
| 2867 | /* [R 9] Interrupt register #0 read */ | 2878 | /* [R 9] Interrupt register #0 read */ |
| 2868 | #define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 | 2879 | #define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 |
| 2869 | /* [RC 9] Interrupt register #0 read clear */ | 2880 | /* [RC 9] Interrupt register #0 read clear */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 9199adf32d33..efa8a151d789 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
| @@ -152,7 +152,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) | |||
| 152 | if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { | 152 | if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { |
| 153 | DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n"); | 153 | DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n"); |
| 154 | *done = PFVF_STATUS_SUCCESS; | 154 | *done = PFVF_STATUS_SUCCESS; |
| 155 | return 0; | 155 | return -EINVAL; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | /* Write message address */ | 158 | /* Write message address */ |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 00c5be8c55b8..a9e068423ba0 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -13618,16 +13618,9 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev, | |||
| 13618 | if (stmpconf.flags) | 13618 | if (stmpconf.flags) |
| 13619 | return -EINVAL; | 13619 | return -EINVAL; |
| 13620 | 13620 | ||
| 13621 | switch (stmpconf.tx_type) { | 13621 | if (stmpconf.tx_type != HWTSTAMP_TX_ON && |
| 13622 | case HWTSTAMP_TX_ON: | 13622 | stmpconf.tx_type != HWTSTAMP_TX_OFF) |
| 13623 | tg3_flag_set(tp, TX_TSTAMP_EN); | ||
| 13624 | break; | ||
| 13625 | case HWTSTAMP_TX_OFF: | ||
| 13626 | tg3_flag_clear(tp, TX_TSTAMP_EN); | ||
| 13627 | break; | ||
| 13628 | default: | ||
| 13629 | return -ERANGE; | 13623 | return -ERANGE; |
| 13630 | } | ||
| 13631 | 13624 | ||
| 13632 | switch (stmpconf.rx_filter) { | 13625 | switch (stmpconf.rx_filter) { |
| 13633 | case HWTSTAMP_FILTER_NONE: | 13626 | case HWTSTAMP_FILTER_NONE: |
| @@ -13689,6 +13682,11 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev, | |||
| 13689 | tw32(TG3_RX_PTP_CTL, | 13682 | tw32(TG3_RX_PTP_CTL, |
| 13690 | tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); | 13683 | tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); |
| 13691 | 13684 | ||
| 13685 | if (stmpconf.tx_type == HWTSTAMP_TX_ON) | ||
| 13686 | tg3_flag_set(tp, TX_TSTAMP_EN); | ||
| 13687 | else | ||
| 13688 | tg3_flag_clear(tp, TX_TSTAMP_EN); | ||
| 13689 | |||
| 13692 | return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? | 13690 | return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? |
| 13693 | -EFAULT : 0; | 13691 | -EFAULT : 0; |
| 13694 | } | 13692 | } |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 7fb0edfe3d24..dbcd5262c016 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
| @@ -1758,7 +1758,7 @@ err: | |||
| 1758 | 1758 | ||
| 1759 | /* Uses sycnhronous mcc */ | 1759 | /* Uses sycnhronous mcc */ |
| 1760 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, | 1760 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, |
| 1761 | u32 num, bool untagged, bool promiscuous) | 1761 | u32 num, bool promiscuous) |
| 1762 | { | 1762 | { |
| 1763 | struct be_mcc_wrb *wrb; | 1763 | struct be_mcc_wrb *wrb; |
| 1764 | struct be_cmd_req_vlan_config *req; | 1764 | struct be_cmd_req_vlan_config *req; |
| @@ -1778,7 +1778,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, | |||
| 1778 | 1778 | ||
| 1779 | req->interface_id = if_id; | 1779 | req->interface_id = if_id; |
| 1780 | req->promiscuous = promiscuous; | 1780 | req->promiscuous = promiscuous; |
| 1781 | req->untagged = untagged; | 1781 | req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; |
| 1782 | req->num_vlan = num; | 1782 | req->num_vlan = num; |
| 1783 | if (!promiscuous) { | 1783 | if (!promiscuous) { |
| 1784 | memcpy(req->normal_vlan, vtag_array, | 1784 | memcpy(req->normal_vlan, vtag_array, |
| @@ -1847,7 +1847,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) | |||
| 1847 | memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); | 1847 | memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); |
| 1848 | } | 1848 | } |
| 1849 | 1849 | ||
| 1850 | if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) != | ||
| 1851 | req->if_flags_mask) { | ||
| 1852 | dev_warn(&adapter->pdev->dev, | ||
| 1853 | "Cannot set rx filter flags 0x%x\n", | ||
| 1854 | req->if_flags_mask); | ||
| 1855 | dev_warn(&adapter->pdev->dev, | ||
| 1856 | "Interface is capable of 0x%x flags only\n", | ||
| 1857 | be_if_cap_flags(adapter)); | ||
| 1858 | } | ||
| 1859 | req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter)); | ||
| 1860 | |||
| 1850 | status = be_mcc_notify_wait(adapter); | 1861 | status = be_mcc_notify_wait(adapter); |
| 1862 | |||
| 1851 | err: | 1863 | err: |
| 1852 | spin_unlock_bh(&adapter->mcc_lock); | 1864 | spin_unlock_bh(&adapter->mcc_lock); |
| 1853 | return status; | 1865 | return status; |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index edf3e8a0ff83..0075686276aa 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h | |||
| @@ -1984,7 +1984,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, | |||
| 1984 | char *fw_on_flash); | 1984 | char *fw_on_flash); |
| 1985 | int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); | 1985 | int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); |
| 1986 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, | 1986 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, |
| 1987 | u32 num, bool untagged, bool promiscuous); | 1987 | u32 num, bool promiscuous); |
| 1988 | int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); | 1988 | int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); |
| 1989 | int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); | 1989 | int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); |
| 1990 | int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); | 1990 | int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index eaecaadfa8c5..abde97471636 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -1079,7 +1079,7 @@ static int be_vid_config(struct be_adapter *adapter) | |||
| 1079 | vids[num++] = cpu_to_le16(i); | 1079 | vids[num++] = cpu_to_le16(i); |
| 1080 | 1080 | ||
| 1081 | status = be_cmd_vlan_config(adapter, adapter->if_handle, | 1081 | status = be_cmd_vlan_config(adapter, adapter->if_handle, |
| 1082 | vids, num, 1, 0); | 1082 | vids, num, 0); |
| 1083 | 1083 | ||
| 1084 | if (status) { | 1084 | if (status) { |
| 1085 | /* Set to VLAN promisc mode as setting VLAN filter failed */ | 1085 | /* Set to VLAN promisc mode as setting VLAN filter failed */ |
| @@ -2676,6 +2676,11 @@ static int be_close(struct net_device *netdev) | |||
| 2676 | 2676 | ||
| 2677 | be_rx_qs_destroy(adapter); | 2677 | be_rx_qs_destroy(adapter); |
| 2678 | 2678 | ||
| 2679 | for (i = 1; i < (adapter->uc_macs + 1); i++) | ||
| 2680 | be_cmd_pmac_del(adapter, adapter->if_handle, | ||
| 2681 | adapter->pmac_id[i], 0); | ||
| 2682 | adapter->uc_macs = 0; | ||
| 2683 | |||
| 2679 | for_all_evt_queues(adapter, eqo, i) { | 2684 | for_all_evt_queues(adapter, eqo, i) { |
| 2680 | if (msix_enabled(adapter)) | 2685 | if (msix_enabled(adapter)) |
| 2681 | synchronize_irq(be_msix_vec_get(adapter, eqo)); | 2686 | synchronize_irq(be_msix_vec_get(adapter, eqo)); |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index b2793b91cc55..4cbebf3d80eb 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -386,7 +386,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 386 | */ | 386 | */ |
| 387 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, | 387 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, |
| 388 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); | 388 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); |
| 389 | 389 | if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { | |
| 390 | bdp->cbd_bufaddr = 0; | ||
| 391 | fep->tx_skbuff[index] = NULL; | ||
| 392 | dev_kfree_skb_any(skb); | ||
| 393 | if (net_ratelimit()) | ||
| 394 | netdev_err(ndev, "Tx DMA memory map failed\n"); | ||
| 395 | return NETDEV_TX_OK; | ||
| 396 | } | ||
| 390 | /* Send it on its way. Tell FEC it's ready, interrupt when done, | 397 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
| 391 | * it's the last BD of the frame, and to put the CRC on the end. | 398 | * it's the last BD of the frame, and to put the CRC on the end. |
| 392 | */ | 399 | */ |
| @@ -861,6 +868,7 @@ fec_enet_rx(struct net_device *ndev, int budget) | |||
| 861 | struct bufdesc_ex *ebdp = NULL; | 868 | struct bufdesc_ex *ebdp = NULL; |
| 862 | bool vlan_packet_rcvd = false; | 869 | bool vlan_packet_rcvd = false; |
| 863 | u16 vlan_tag; | 870 | u16 vlan_tag; |
| 871 | int index = 0; | ||
| 864 | 872 | ||
| 865 | #ifdef CONFIG_M532x | 873 | #ifdef CONFIG_M532x |
| 866 | flush_cache_all(); | 874 | flush_cache_all(); |
| @@ -916,10 +924,15 @@ fec_enet_rx(struct net_device *ndev, int budget) | |||
| 916 | ndev->stats.rx_packets++; | 924 | ndev->stats.rx_packets++; |
| 917 | pkt_len = bdp->cbd_datlen; | 925 | pkt_len = bdp->cbd_datlen; |
| 918 | ndev->stats.rx_bytes += pkt_len; | 926 | ndev->stats.rx_bytes += pkt_len; |
| 919 | data = (__u8*)__va(bdp->cbd_bufaddr); | ||
| 920 | 927 | ||
| 921 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | 928 | if (fep->bufdesc_ex) |
| 922 | FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); | 929 | index = (struct bufdesc_ex *)bdp - |
| 930 | (struct bufdesc_ex *)fep->rx_bd_base; | ||
| 931 | else | ||
| 932 | index = bdp - fep->rx_bd_base; | ||
| 933 | data = fep->rx_skbuff[index]->data; | ||
| 934 | dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, | ||
| 935 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | ||
| 923 | 936 | ||
| 924 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | 937 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) |
| 925 | swap_buffer(data, pkt_len); | 938 | swap_buffer(data, pkt_len); |
| @@ -999,8 +1012,8 @@ fec_enet_rx(struct net_device *ndev, int budget) | |||
| 999 | napi_gro_receive(&fep->napi, skb); | 1012 | napi_gro_receive(&fep->napi, skb); |
| 1000 | } | 1013 | } |
| 1001 | 1014 | ||
| 1002 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, | 1015 | dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, |
| 1003 | FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); | 1016 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); |
| 1004 | rx_processing_done: | 1017 | rx_processing_done: |
| 1005 | /* Clear the status flags for this buffer */ | 1018 | /* Clear the status flags for this buffer */ |
| 1006 | status &= ~BD_ENET_RX_STATS; | 1019 | status &= ~BD_ENET_RX_STATS; |
| @@ -1719,6 +1732,12 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
| 1719 | 1732 | ||
| 1720 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, | 1733 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, |
| 1721 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | 1734 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); |
| 1735 | if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { | ||
| 1736 | fec_enet_free_buffers(ndev); | ||
| 1737 | if (net_ratelimit()) | ||
| 1738 | netdev_err(ndev, "Rx DMA memory map failed\n"); | ||
| 1739 | return -ENOMEM; | ||
| 1740 | } | ||
| 1722 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | 1741 | bdp->cbd_sc = BD_ENET_RX_EMPTY; |
| 1723 | 1742 | ||
| 1724 | if (fep->bufdesc_ex) { | 1743 | if (fep->bufdesc_ex) { |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index aedd5736a87d..8d3945ab7334 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
| @@ -3482,10 +3482,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) | |||
| 3482 | * specified. Matching the kind of event packet is not supported, with the | 3482 | * specified. Matching the kind of event packet is not supported, with the |
| 3483 | * exception of "all V2 events regardless of level 2 or 4". | 3483 | * exception of "all V2 events regardless of level 2 or 4". |
| 3484 | **/ | 3484 | **/ |
| 3485 | static int e1000e_config_hwtstamp(struct e1000_adapter *adapter) | 3485 | static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, |
| 3486 | struct hwtstamp_config *config) | ||
| 3486 | { | 3487 | { |
| 3487 | struct e1000_hw *hw = &adapter->hw; | 3488 | struct e1000_hw *hw = &adapter->hw; |
| 3488 | struct hwtstamp_config *config = &adapter->hwtstamp_config; | ||
| 3489 | u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; | 3489 | u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; |
| 3490 | u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; | 3490 | u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; |
| 3491 | u32 rxmtrl = 0; | 3491 | u32 rxmtrl = 0; |
| @@ -3586,6 +3586,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter) | |||
| 3586 | return -ERANGE; | 3586 | return -ERANGE; |
| 3587 | } | 3587 | } |
| 3588 | 3588 | ||
| 3589 | adapter->hwtstamp_config = *config; | ||
| 3590 | |||
| 3589 | /* enable/disable Tx h/w time stamping */ | 3591 | /* enable/disable Tx h/w time stamping */ |
| 3590 | regval = er32(TSYNCTXCTL); | 3592 | regval = er32(TSYNCTXCTL); |
| 3591 | regval &= ~E1000_TSYNCTXCTL_ENABLED; | 3593 | regval &= ~E1000_TSYNCTXCTL_ENABLED; |
| @@ -3874,7 +3876,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
| 3874 | e1000e_reset_adaptive(hw); | 3876 | e1000e_reset_adaptive(hw); |
| 3875 | 3877 | ||
| 3876 | /* initialize systim and reset the ns time counter */ | 3878 | /* initialize systim and reset the ns time counter */ |
| 3877 | e1000e_config_hwtstamp(adapter); | 3879 | e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); |
| 3878 | 3880 | ||
| 3879 | /* Set EEE advertisement as appropriate */ | 3881 | /* Set EEE advertisement as appropriate */ |
| 3880 | if (adapter->flags2 & FLAG2_HAS_EEE) { | 3882 | if (adapter->flags2 & FLAG2_HAS_EEE) { |
| @@ -5797,14 +5799,10 @@ static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) | |||
| 5797 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) | 5799 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) |
| 5798 | return -EFAULT; | 5800 | return -EFAULT; |
| 5799 | 5801 | ||
| 5800 | adapter->hwtstamp_config = config; | 5802 | ret_val = e1000e_config_hwtstamp(adapter, &config); |
| 5801 | |||
| 5802 | ret_val = e1000e_config_hwtstamp(adapter); | ||
| 5803 | if (ret_val) | 5803 | if (ret_val) |
| 5804 | return ret_val; | 5804 | return ret_val; |
| 5805 | 5805 | ||
| 5806 | config = adapter->hwtstamp_config; | ||
| 5807 | |||
| 5808 | switch (config.rx_filter) { | 5806 | switch (config.rx_filter) { |
| 5809 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: | 5807 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: |
| 5810 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: | 5808 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: |
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 00cd36e08601..61088a6a9424 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
| @@ -2890,7 +2890,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
| 2890 | PHY_INTERFACE_MODE_GMII); | 2890 | PHY_INTERFACE_MODE_GMII); |
| 2891 | if (!mp->phy) | 2891 | if (!mp->phy) |
| 2892 | err = -ENODEV; | 2892 | err = -ENODEV; |
| 2893 | phy_addr_set(mp, mp->phy->addr); | 2893 | else |
| 2894 | phy_addr_set(mp, mp->phy->addr); | ||
| 2894 | } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { | 2895 | } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { |
| 2895 | mp->phy = phy_scan(mp, pd->phy_addr); | 2896 | mp->phy = phy_scan(mp, pd->phy_addr); |
| 2896 | 2897 | ||
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 0951f7aca1ef..822616e3c375 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c | |||
| @@ -459,8 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) | |||
| 459 | sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; | 459 | sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; |
| 460 | 460 | ||
| 461 | ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, | 461 | ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, |
| 462 | &ctl->sg, 1, DMA_MEM_TO_DEV, | 462 | &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); |
| 463 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | ||
| 464 | if (!ctl->adesc) | 463 | if (!ctl->adesc) |
| 465 | return NETDEV_TX_BUSY; | 464 | return NETDEV_TX_BUSY; |
| 466 | 465 | ||
| @@ -571,8 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) | |||
| 571 | sg_dma_len(sg) = DMA_BUFFER_SIZE; | 570 | sg_dma_len(sg) = DMA_BUFFER_SIZE; |
| 572 | 571 | ||
| 573 | ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, | 572 | ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, |
| 574 | sg, 1, DMA_DEV_TO_MEM, | 573 | sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); |
| 575 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | ||
| 576 | 574 | ||
| 577 | if (!ctl->adesc) | 575 | if (!ctl->adesc) |
| 578 | goto out; | 576 | goto out; |
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 5a0f04c2c813..27ffe0ebf0a6 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | |||
| @@ -245,16 +245,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
| 245 | /* Get ieee1588's dev information */ | 245 | /* Get ieee1588's dev information */ |
| 246 | pdev = adapter->ptp_pdev; | 246 | pdev = adapter->ptp_pdev; |
| 247 | 247 | ||
| 248 | switch (cfg.tx_type) { | 248 | if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) |
| 249 | case HWTSTAMP_TX_OFF: | ||
| 250 | adapter->hwts_tx_en = 0; | ||
| 251 | break; | ||
| 252 | case HWTSTAMP_TX_ON: | ||
| 253 | adapter->hwts_tx_en = 1; | ||
| 254 | break; | ||
| 255 | default: | ||
| 256 | return -ERANGE; | 249 | return -ERANGE; |
| 257 | } | ||
| 258 | 250 | ||
| 259 | switch (cfg.rx_filter) { | 251 | switch (cfg.rx_filter) { |
| 260 | case HWTSTAMP_FILTER_NONE: | 252 | case HWTSTAMP_FILTER_NONE: |
| @@ -284,6 +276,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
| 284 | return -ERANGE; | 276 | return -ERANGE; |
| 285 | } | 277 | } |
| 286 | 278 | ||
| 279 | adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; | ||
| 280 | |||
| 287 | /* Clear out any old time stamps. */ | 281 | /* Clear out any old time stamps. */ |
| 288 | pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED); | 282 | pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED); |
| 289 | 283 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8d4ccd35a016..8a7a23a84ac5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -435,16 +435,9 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
| 435 | if (config.flags) | 435 | if (config.flags) |
| 436 | return -EINVAL; | 436 | return -EINVAL; |
| 437 | 437 | ||
| 438 | switch (config.tx_type) { | 438 | if (config.tx_type != HWTSTAMP_TX_OFF && |
| 439 | case HWTSTAMP_TX_OFF: | 439 | config.tx_type != HWTSTAMP_TX_ON) |
| 440 | priv->hwts_tx_en = 0; | ||
| 441 | break; | ||
| 442 | case HWTSTAMP_TX_ON: | ||
| 443 | priv->hwts_tx_en = 1; | ||
| 444 | break; | ||
| 445 | default: | ||
| 446 | return -ERANGE; | 440 | return -ERANGE; |
| 447 | } | ||
| 448 | 441 | ||
| 449 | if (priv->adv_ts) { | 442 | if (priv->adv_ts) { |
| 450 | switch (config.rx_filter) { | 443 | switch (config.rx_filter) { |
| @@ -576,6 +569,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
| 576 | } | 569 | } |
| 577 | } | 570 | } |
| 578 | priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); | 571 | priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); |
| 572 | priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; | ||
| 579 | 573 | ||
| 580 | if (!priv->hwts_tx_en && !priv->hwts_rx_en) | 574 | if (!priv->hwts_tx_en && !priv->hwts_rx_en) |
| 581 | priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); | 575 | priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 90d41d26ec6d..7536a4c01293 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
| @@ -967,14 +967,19 @@ static inline void cpsw_add_dual_emac_def_ale_entries( | |||
| 967 | priv->host_port, ALE_VLAN, slave->port_vlan); | 967 | priv->host_port, ALE_VLAN, slave->port_vlan); |
| 968 | } | 968 | } |
| 969 | 969 | ||
| 970 | static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) | 970 | static void soft_reset_slave(struct cpsw_slave *slave) |
| 971 | { | 971 | { |
| 972 | char name[32]; | 972 | char name[32]; |
| 973 | u32 slave_port; | ||
| 974 | |||
| 975 | sprintf(name, "slave-%d", slave->slave_num); | ||
| 976 | 973 | ||
| 974 | snprintf(name, sizeof(name), "slave-%d", slave->slave_num); | ||
| 977 | soft_reset(name, &slave->sliver->soft_reset); | 975 | soft_reset(name, &slave->sliver->soft_reset); |
| 976 | } | ||
| 977 | |||
| 978 | static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) | ||
| 979 | { | ||
| 980 | u32 slave_port; | ||
| 981 | |||
| 982 | soft_reset_slave(slave); | ||
| 978 | 983 | ||
| 979 | /* setup priority mapping */ | 984 | /* setup priority mapping */ |
| 980 | __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); | 985 | __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); |
| @@ -1323,6 +1328,10 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
| 1323 | struct cpts *cpts = priv->cpts; | 1328 | struct cpts *cpts = priv->cpts; |
| 1324 | struct hwtstamp_config cfg; | 1329 | struct hwtstamp_config cfg; |
| 1325 | 1330 | ||
| 1331 | if (priv->version != CPSW_VERSION_1 && | ||
| 1332 | priv->version != CPSW_VERSION_2) | ||
| 1333 | return -EOPNOTSUPP; | ||
| 1334 | |||
| 1326 | if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) | 1335 | if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) |
| 1327 | return -EFAULT; | 1336 | return -EFAULT; |
| 1328 | 1337 | ||
| @@ -1330,16 +1339,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
| 1330 | if (cfg.flags) | 1339 | if (cfg.flags) |
| 1331 | return -EINVAL; | 1340 | return -EINVAL; |
| 1332 | 1341 | ||
| 1333 | switch (cfg.tx_type) { | 1342 | if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) |
| 1334 | case HWTSTAMP_TX_OFF: | ||
| 1335 | cpts->tx_enable = 0; | ||
| 1336 | break; | ||
| 1337 | case HWTSTAMP_TX_ON: | ||
| 1338 | cpts->tx_enable = 1; | ||
| 1339 | break; | ||
| 1340 | default: | ||
| 1341 | return -ERANGE; | 1343 | return -ERANGE; |
| 1342 | } | ||
| 1343 | 1344 | ||
| 1344 | switch (cfg.rx_filter) { | 1345 | switch (cfg.rx_filter) { |
| 1345 | case HWTSTAMP_FILTER_NONE: | 1346 | case HWTSTAMP_FILTER_NONE: |
| @@ -1366,6 +1367,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
| 1366 | return -ERANGE; | 1367 | return -ERANGE; |
| 1367 | } | 1368 | } |
| 1368 | 1369 | ||
| 1370 | cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON; | ||
| 1371 | |||
| 1369 | switch (priv->version) { | 1372 | switch (priv->version) { |
| 1370 | case CPSW_VERSION_1: | 1373 | case CPSW_VERSION_1: |
| 1371 | cpsw_hwtstamp_v1(priv); | 1374 | cpsw_hwtstamp_v1(priv); |
| @@ -1374,7 +1377,7 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
| 1374 | cpsw_hwtstamp_v2(priv); | 1377 | cpsw_hwtstamp_v2(priv); |
| 1375 | break; | 1378 | break; |
| 1376 | default: | 1379 | default: |
| 1377 | return -ENOTSUPP; | 1380 | WARN_ON(1); |
| 1378 | } | 1381 | } |
| 1379 | 1382 | ||
| 1380 | return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; | 1383 | return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; |
| @@ -2173,8 +2176,9 @@ static int cpsw_suspend(struct device *dev) | |||
| 2173 | 2176 | ||
| 2174 | if (netif_running(ndev)) | 2177 | if (netif_running(ndev)) |
| 2175 | cpsw_ndo_stop(ndev); | 2178 | cpsw_ndo_stop(ndev); |
| 2176 | soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset); | 2179 | |
| 2177 | soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset); | 2180 | for_each_slave(priv, soft_reset_slave); |
| 2181 | |||
| 2178 | pm_runtime_put_sync(&pdev->dev); | 2182 | pm_runtime_put_sync(&pdev->dev); |
| 2179 | 2183 | ||
| 2180 | /* Select sleep pin state */ | 2184 | /* Select sleep pin state */ |
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index e78802e75ea6..bcc224a83734 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c | |||
| @@ -389,16 +389,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
| 389 | ch = PORT2CHANNEL(port); | 389 | ch = PORT2CHANNEL(port); |
| 390 | regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; | 390 | regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; |
| 391 | 391 | ||
| 392 | switch (cfg.tx_type) { | 392 | if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) |
| 393 | case HWTSTAMP_TX_OFF: | ||
| 394 | port->hwts_tx_en = 0; | ||
| 395 | break; | ||
| 396 | case HWTSTAMP_TX_ON: | ||
| 397 | port->hwts_tx_en = 1; | ||
| 398 | break; | ||
| 399 | default: | ||
| 400 | return -ERANGE; | 393 | return -ERANGE; |
| 401 | } | ||
| 402 | 394 | ||
| 403 | switch (cfg.rx_filter) { | 395 | switch (cfg.rx_filter) { |
| 404 | case HWTSTAMP_FILTER_NONE: | 396 | case HWTSTAMP_FILTER_NONE: |
| @@ -416,6 +408,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
| 416 | return -ERANGE; | 408 | return -ERANGE; |
| 417 | } | 409 | } |
| 418 | 410 | ||
| 411 | port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; | ||
| 412 | |||
| 419 | /* Clear out any old time stamps. */ | 413 | /* Clear out any old time stamps. */ |
| 420 | __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, | 414 | __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, |
| 421 | ®s->channel[ch].ch_event); | 415 | ®s->channel[ch].ch_event); |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 9dccb1edfd2a..dc76670c2f2a 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
| @@ -628,6 +628,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, | |||
| 628 | const struct iovec *iv, unsigned long total_len, | 628 | const struct iovec *iv, unsigned long total_len, |
| 629 | size_t count, int noblock) | 629 | size_t count, int noblock) |
| 630 | { | 630 | { |
| 631 | int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); | ||
| 631 | struct sk_buff *skb; | 632 | struct sk_buff *skb; |
| 632 | struct macvlan_dev *vlan; | 633 | struct macvlan_dev *vlan; |
| 633 | unsigned long len = total_len; | 634 | unsigned long len = total_len; |
| @@ -670,6 +671,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, | |||
| 670 | 671 | ||
| 671 | if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { | 672 | if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { |
| 672 | copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN; | 673 | copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN; |
| 674 | if (copylen > good_linear) | ||
| 675 | copylen = good_linear; | ||
| 673 | linear = copylen; | 676 | linear = copylen; |
| 674 | if (iov_pages(iv, vnet_hdr_len + copylen, count) | 677 | if (iov_pages(iv, vnet_hdr_len + copylen, count) |
| 675 | <= MAX_SKB_FRAGS) | 678 | <= MAX_SKB_FRAGS) |
| @@ -678,7 +681,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, | |||
| 678 | 681 | ||
| 679 | if (!zerocopy) { | 682 | if (!zerocopy) { |
| 680 | copylen = len; | 683 | copylen = len; |
| 681 | linear = vnet_hdr.hdr_len; | 684 | if (vnet_hdr.hdr_len > good_linear) |
| 685 | linear = good_linear; | ||
| 686 | else | ||
| 687 | linear = vnet_hdr.hdr_len; | ||
| 682 | } | 688 | } |
| 683 | 689 | ||
| 684 | skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, | 690 | skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 6574eb8766f9..34b0de09d881 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
| @@ -2650,7 +2650,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb, | |||
| 2650 | return err; | 2650 | return err; |
| 2651 | } | 2651 | } |
| 2652 | 2652 | ||
| 2653 | static struct genl_ops team_nl_ops[] = { | 2653 | static const struct genl_ops team_nl_ops[] = { |
| 2654 | { | 2654 | { |
| 2655 | .cmd = TEAM_CMD_NOOP, | 2655 | .cmd = TEAM_CMD_NOOP, |
| 2656 | .doit = team_nl_cmd_noop, | 2656 | .doit = team_nl_cmd_noop, |
| @@ -2676,15 +2676,15 @@ static struct genl_ops team_nl_ops[] = { | |||
| 2676 | }, | 2676 | }, |
| 2677 | }; | 2677 | }; |
| 2678 | 2678 | ||
| 2679 | static struct genl_multicast_group team_change_event_mcgrp = { | 2679 | static const struct genl_multicast_group team_nl_mcgrps[] = { |
| 2680 | .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, | 2680 | { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, }, |
| 2681 | }; | 2681 | }; |
| 2682 | 2682 | ||
| 2683 | static int team_nl_send_multicast(struct sk_buff *skb, | 2683 | static int team_nl_send_multicast(struct sk_buff *skb, |
| 2684 | struct team *team, u32 portid) | 2684 | struct team *team, u32 portid) |
| 2685 | { | 2685 | { |
| 2686 | return genlmsg_multicast_netns(dev_net(team->dev), skb, 0, | 2686 | return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev), |
| 2687 | team_change_event_mcgrp.id, GFP_KERNEL); | 2687 | skb, 0, 0, GFP_KERNEL); |
| 2688 | } | 2688 | } |
| 2689 | 2689 | ||
| 2690 | static int team_nl_send_event_options_get(struct team *team, | 2690 | static int team_nl_send_event_options_get(struct team *team, |
| @@ -2703,23 +2703,8 @@ static int team_nl_send_event_port_get(struct team *team, | |||
| 2703 | 2703 | ||
| 2704 | static int team_nl_init(void) | 2704 | static int team_nl_init(void) |
| 2705 | { | 2705 | { |
| 2706 | int err; | 2706 | return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops, |
| 2707 | 2707 | team_nl_mcgrps); | |
| 2708 | err = genl_register_family_with_ops(&team_nl_family, team_nl_ops, | ||
| 2709 | ARRAY_SIZE(team_nl_ops)); | ||
| 2710 | if (err) | ||
| 2711 | return err; | ||
| 2712 | |||
| 2713 | err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp); | ||
| 2714 | if (err) | ||
| 2715 | goto err_change_event_grp_reg; | ||
| 2716 | |||
| 2717 | return 0; | ||
| 2718 | |||
| 2719 | err_change_event_grp_reg: | ||
| 2720 | genl_unregister_family(&team_nl_family); | ||
| 2721 | |||
| 2722 | return err; | ||
| 2723 | } | 2708 | } |
| 2724 | 2709 | ||
| 2725 | static void team_nl_fini(void) | 2710 | static void team_nl_fini(void) |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7cb105c103fe..782e38bfc1ee 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -981,6 +981,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
| 981 | struct sk_buff *skb; | 981 | struct sk_buff *skb; |
| 982 | size_t len = total_len, align = NET_SKB_PAD, linear; | 982 | size_t len = total_len, align = NET_SKB_PAD, linear; |
| 983 | struct virtio_net_hdr gso = { 0 }; | 983 | struct virtio_net_hdr gso = { 0 }; |
| 984 | int good_linear; | ||
| 984 | int offset = 0; | 985 | int offset = 0; |
| 985 | int copylen; | 986 | int copylen; |
| 986 | bool zerocopy = false; | 987 | bool zerocopy = false; |
| @@ -1021,12 +1022,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1021 | return -EINVAL; | 1022 | return -EINVAL; |
| 1022 | } | 1023 | } |
| 1023 | 1024 | ||
| 1025 | good_linear = SKB_MAX_HEAD(align); | ||
| 1026 | |||
| 1024 | if (msg_control) { | 1027 | if (msg_control) { |
| 1025 | /* There are 256 bytes to be copied in skb, so there is | 1028 | /* There are 256 bytes to be copied in skb, so there is |
| 1026 | * enough room for skb expand head in case it is used. | 1029 | * enough room for skb expand head in case it is used. |
| 1027 | * The rest of the buffer is mapped from userspace. | 1030 | * The rest of the buffer is mapped from userspace. |
| 1028 | */ | 1031 | */ |
| 1029 | copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN; | 1032 | copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN; |
| 1033 | if (copylen > good_linear) | ||
| 1034 | copylen = good_linear; | ||
| 1030 | linear = copylen; | 1035 | linear = copylen; |
| 1031 | if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS) | 1036 | if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS) |
| 1032 | zerocopy = true; | 1037 | zerocopy = true; |
| @@ -1034,7 +1039,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1034 | 1039 | ||
| 1035 | if (!zerocopy) { | 1040 | if (!zerocopy) { |
| 1036 | copylen = len; | 1041 | copylen = len; |
| 1037 | linear = gso.hdr_len; | 1042 | if (gso.hdr_len > good_linear) |
| 1043 | linear = good_linear; | ||
| 1044 | else | ||
| 1045 | linear = gso.hdr_len; | ||
| 1038 | } | 1046 | } |
| 1039 | 1047 | ||
| 1040 | skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); | 1048 | skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index f74786aa37be..e15ec2b12035 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
| @@ -66,7 +66,7 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); | |||
| 66 | static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); | 66 | static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); |
| 67 | static struct usb_driver cdc_ncm_driver; | 67 | static struct usb_driver cdc_ncm_driver; |
| 68 | 68 | ||
| 69 | static u8 cdc_ncm_setup(struct usbnet *dev) | 69 | static int cdc_ncm_setup(struct usbnet *dev) |
| 70 | { | 70 | { |
| 71 | struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; | 71 | struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; |
| 72 | struct usb_cdc_ncm_ntb_parameters ncm_parm; | 72 | struct usb_cdc_ncm_ntb_parameters ncm_parm; |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 90a429b7ebad..8494bb53ebdc 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
| @@ -204,9 +204,6 @@ static void intr_complete (struct urb *urb) | |||
| 204 | break; | 204 | break; |
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | if (!netif_running (dev->net)) | ||
| 208 | return; | ||
| 209 | |||
| 210 | status = usb_submit_urb (urb, GFP_ATOMIC); | 207 | status = usb_submit_urb (urb, GFP_ATOMIC); |
| 211 | if (status != 0) | 208 | if (status != 0) |
| 212 | netif_err(dev, timer, dev->net, | 209 | netif_err(dev, timer, dev->net, |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index cdc7c90a6a9e..7bab4de658a9 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -36,7 +36,10 @@ module_param(csum, bool, 0444); | |||
| 36 | module_param(gso, bool, 0444); | 36 | module_param(gso, bool, 0444); |
| 37 | 37 | ||
| 38 | /* FIXME: MTU in config. */ | 38 | /* FIXME: MTU in config. */ |
| 39 | #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) | 39 | #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
| 40 | #define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \ | ||
| 41 | sizeof(struct virtio_net_hdr_mrg_rxbuf), \ | ||
| 42 | L1_CACHE_BYTES)) | ||
| 40 | #define GOOD_COPY_LEN 128 | 43 | #define GOOD_COPY_LEN 128 |
| 41 | 44 | ||
| 42 | #define VIRTNET_DRIVER_VERSION "1.0.0" | 45 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
| @@ -314,10 +317,10 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) | |||
| 314 | head_skb->dev->stats.rx_length_errors++; | 317 | head_skb->dev->stats.rx_length_errors++; |
| 315 | return -EINVAL; | 318 | return -EINVAL; |
| 316 | } | 319 | } |
| 317 | if (unlikely(len > MAX_PACKET_LEN)) { | 320 | if (unlikely(len > MERGE_BUFFER_LEN)) { |
| 318 | pr_debug("%s: rx error: merge buffer too long\n", | 321 | pr_debug("%s: rx error: merge buffer too long\n", |
| 319 | head_skb->dev->name); | 322 | head_skb->dev->name); |
| 320 | len = MAX_PACKET_LEN; | 323 | len = MERGE_BUFFER_LEN; |
| 321 | } | 324 | } |
| 322 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { | 325 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { |
| 323 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); | 326 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); |
| @@ -336,18 +339,17 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) | |||
| 336 | if (curr_skb != head_skb) { | 339 | if (curr_skb != head_skb) { |
| 337 | head_skb->data_len += len; | 340 | head_skb->data_len += len; |
| 338 | head_skb->len += len; | 341 | head_skb->len += len; |
| 339 | head_skb->truesize += MAX_PACKET_LEN; | 342 | head_skb->truesize += MERGE_BUFFER_LEN; |
| 340 | } | 343 | } |
| 341 | page = virt_to_head_page(buf); | 344 | page = virt_to_head_page(buf); |
| 342 | offset = buf - (char *)page_address(page); | 345 | offset = buf - (char *)page_address(page); |
| 343 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { | 346 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { |
| 344 | put_page(page); | 347 | put_page(page); |
| 345 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, | 348 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, |
| 346 | len, MAX_PACKET_LEN); | 349 | len, MERGE_BUFFER_LEN); |
| 347 | } else { | 350 | } else { |
| 348 | skb_add_rx_frag(curr_skb, num_skb_frags, page, | 351 | skb_add_rx_frag(curr_skb, num_skb_frags, page, |
| 349 | offset, len, | 352 | offset, len, MERGE_BUFFER_LEN); |
| 350 | MAX_PACKET_LEN); | ||
| 351 | } | 353 | } |
| 352 | --rq->num; | 354 | --rq->num; |
| 353 | } | 355 | } |
| @@ -383,7 +385,7 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) | |||
| 383 | struct page *page = virt_to_head_page(buf); | 385 | struct page *page = virt_to_head_page(buf); |
| 384 | skb = page_to_skb(rq, page, | 386 | skb = page_to_skb(rq, page, |
| 385 | (char *)buf - (char *)page_address(page), | 387 | (char *)buf - (char *)page_address(page), |
| 386 | len, MAX_PACKET_LEN); | 388 | len, MERGE_BUFFER_LEN); |
| 387 | if (unlikely(!skb)) { | 389 | if (unlikely(!skb)) { |
| 388 | dev->stats.rx_dropped++; | 390 | dev->stats.rx_dropped++; |
| 389 | put_page(page); | 391 | put_page(page); |
| @@ -471,11 +473,11 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) | |||
| 471 | struct skb_vnet_hdr *hdr; | 473 | struct skb_vnet_hdr *hdr; |
| 472 | int err; | 474 | int err; |
| 473 | 475 | ||
| 474 | skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); | 476 | skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); |
| 475 | if (unlikely(!skb)) | 477 | if (unlikely(!skb)) |
| 476 | return -ENOMEM; | 478 | return -ENOMEM; |
| 477 | 479 | ||
| 478 | skb_put(skb, MAX_PACKET_LEN); | 480 | skb_put(skb, GOOD_PACKET_LEN); |
| 479 | 481 | ||
| 480 | hdr = skb_vnet_hdr(skb); | 482 | hdr = skb_vnet_hdr(skb); |
| 481 | sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); | 483 | sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); |
| @@ -542,20 +544,20 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) | |||
| 542 | int err; | 544 | int err; |
| 543 | 545 | ||
| 544 | if (gfp & __GFP_WAIT) { | 546 | if (gfp & __GFP_WAIT) { |
| 545 | if (skb_page_frag_refill(MAX_PACKET_LEN, &vi->alloc_frag, | 547 | if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag, |
| 546 | gfp)) { | 548 | gfp)) { |
| 547 | buf = (char *)page_address(vi->alloc_frag.page) + | 549 | buf = (char *)page_address(vi->alloc_frag.page) + |
| 548 | vi->alloc_frag.offset; | 550 | vi->alloc_frag.offset; |
| 549 | get_page(vi->alloc_frag.page); | 551 | get_page(vi->alloc_frag.page); |
| 550 | vi->alloc_frag.offset += MAX_PACKET_LEN; | 552 | vi->alloc_frag.offset += MERGE_BUFFER_LEN; |
| 551 | } | 553 | } |
| 552 | } else { | 554 | } else { |
| 553 | buf = netdev_alloc_frag(MAX_PACKET_LEN); | 555 | buf = netdev_alloc_frag(MERGE_BUFFER_LEN); |
| 554 | } | 556 | } |
| 555 | if (!buf) | 557 | if (!buf) |
| 556 | return -ENOMEM; | 558 | return -ENOMEM; |
| 557 | 559 | ||
| 558 | sg_init_one(rq->sg, buf, MAX_PACKET_LEN); | 560 | sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN); |
| 559 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); | 561 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); |
| 560 | if (err < 0) | 562 | if (err < 0) |
| 561 | put_page(virt_to_head_page(buf)); | 563 | put_page(virt_to_head_page(buf)); |
| @@ -1619,8 +1621,8 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
| 1619 | if (err) | 1621 | if (err) |
| 1620 | goto free_stats; | 1622 | goto free_stats; |
| 1621 | 1623 | ||
| 1622 | netif_set_real_num_tx_queues(dev, 1); | 1624 | netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); |
| 1623 | netif_set_real_num_rx_queues(dev, 1); | 1625 | netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); |
| 1624 | 1626 | ||
| 1625 | err = register_netdev(dev); | 1627 | err = register_netdev(dev); |
| 1626 | if (err) { | 1628 | if (err) { |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index b07f164d65cf..20e49095db2a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c | |||
| @@ -187,17 +187,17 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah) | |||
| 187 | INIT_INI_ARRAY(&ah->iniCckfirJapan2484, | 187 | INIT_INI_ARRAY(&ah->iniCckfirJapan2484, |
| 188 | ar9485_1_1_baseband_core_txfir_coeff_japan_2484); | 188 | ar9485_1_1_baseband_core_txfir_coeff_japan_2484); |
| 189 | 189 | ||
| 190 | /* Load PCIE SERDES settings from INI */ | 190 | if (ah->config.no_pll_pwrsave) { |
| 191 | 191 | INIT_INI_ARRAY(&ah->iniPcieSerdes, | |
| 192 | /* Awake Setting */ | 192 | ar9485_1_1_pcie_phy_clkreq_disable_L1); |
| 193 | 193 | INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, | |
| 194 | INIT_INI_ARRAY(&ah->iniPcieSerdes, | 194 | ar9485_1_1_pcie_phy_clkreq_disable_L1); |
| 195 | ar9485_1_1_pcie_phy_clkreq_disable_L1); | 195 | } else { |
| 196 | 196 | INIT_INI_ARRAY(&ah->iniPcieSerdes, | |
| 197 | /* Sleep Setting */ | 197 | ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1); |
| 198 | 198 | INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, | |
| 199 | INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, | 199 | ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1); |
| 200 | ar9485_1_1_pcie_phy_clkreq_disable_L1); | 200 | } |
| 201 | } else if (AR_SREV_9462_21(ah)) { | 201 | } else if (AR_SREV_9462_21(ah)) { |
| 202 | INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], | 202 | INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], |
| 203 | ar9462_2p1_mac_core); | 203 | ar9462_2p1_mac_core); |
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h index 6f899c692647..7c1845221e1c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h | |||
| @@ -32,13 +32,6 @@ static const u32 ar9485_1_1_mac_postamble[][5] = { | |||
| 32 | {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440}, | 32 | {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440}, |
| 33 | }; | 33 | }; |
| 34 | 34 | ||
| 35 | static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = { | ||
| 36 | /* Addr allmodes */ | ||
| 37 | {0x00018c00, 0x18012e5e}, | ||
| 38 | {0x00018c04, 0x000801d8}, | ||
| 39 | {0x00018c08, 0x0000080c}, | ||
| 40 | }; | ||
| 41 | |||
| 42 | static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = { | 35 | static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = { |
| 43 | /* Addr allmodes */ | 36 | /* Addr allmodes */ |
| 44 | {0x00009e00, 0x037216a0}, | 37 | {0x00009e00, 0x037216a0}, |
| @@ -1101,20 +1094,6 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = { | |||
| 1101 | {0x0000a1fc, 0x00000296}, | 1094 | {0x0000a1fc, 0x00000296}, |
| 1102 | }; | 1095 | }; |
| 1103 | 1096 | ||
| 1104 | static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = { | ||
| 1105 | /* Addr allmodes */ | ||
| 1106 | {0x00018c00, 0x18052e5e}, | ||
| 1107 | {0x00018c04, 0x000801d8}, | ||
| 1108 | {0x00018c08, 0x0000080c}, | ||
| 1109 | }; | ||
| 1110 | |||
| 1111 | static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = { | ||
| 1112 | /* Addr allmodes */ | ||
| 1113 | {0x00018c00, 0x18053e5e}, | ||
| 1114 | {0x00018c04, 0x000801d8}, | ||
| 1115 | {0x00018c08, 0x0000080c}, | ||
| 1116 | }; | ||
| 1117 | |||
| 1118 | static const u32 ar9485_1_1_soc_preamble[][2] = { | 1097 | static const u32 ar9485_1_1_soc_preamble[][2] = { |
| 1119 | /* Addr allmodes */ | 1098 | /* Addr allmodes */ |
| 1120 | {0x00004014, 0xba280400}, | 1099 | {0x00004014, 0xba280400}, |
| @@ -1173,13 +1152,6 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = { | |||
| 1173 | {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, | 1152 | {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
| 1174 | }; | 1153 | }; |
| 1175 | 1154 | ||
| 1176 | static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = { | ||
| 1177 | /* Addr allmodes */ | ||
| 1178 | {0x00018c00, 0x18013e5e}, | ||
| 1179 | {0x00018c04, 0x000801d8}, | ||
| 1180 | {0x00018c08, 0x0000080c}, | ||
| 1181 | }; | ||
| 1182 | |||
| 1183 | static const u32 ar9485_1_1_radio_postamble[][2] = { | 1155 | static const u32 ar9485_1_1_radio_postamble[][2] = { |
| 1184 | /* Addr allmodes */ | 1156 | /* Addr allmodes */ |
| 1185 | {0x0001609c, 0x0b283f31}, | 1157 | {0x0001609c, 0x0b283f31}, |
| @@ -1358,4 +1330,18 @@ static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = { | |||
| 1358 | {0x0000a3a0, 0xca9228ee}, | 1330 | {0x0000a3a0, 0xca9228ee}, |
| 1359 | }; | 1331 | }; |
| 1360 | 1332 | ||
| 1333 | static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = { | ||
| 1334 | /* Addr allmodes */ | ||
| 1335 | {0x00018c00, 0x18013e5e}, | ||
| 1336 | {0x00018c04, 0x000801d8}, | ||
| 1337 | {0x00018c08, 0x0000080c}, | ||
| 1338 | }; | ||
| 1339 | |||
| 1340 | static const u32 ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1[][2] = { | ||
| 1341 | /* Addr allmodes */ | ||
| 1342 | {0x00018c00, 0x1801265e}, | ||
| 1343 | {0x00018c04, 0x000801d8}, | ||
| 1344 | {0x00018c08, 0x0000080c}, | ||
| 1345 | }; | ||
| 1346 | |||
| 1361 | #endif /* INITVALS_9485_H */ | 1347 | #endif /* INITVALS_9485_H */ |
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index e7a38d844a6a..60a5da53668f 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h | |||
| @@ -632,15 +632,16 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs); | |||
| 632 | /* Main driver core */ | 632 | /* Main driver core */ |
| 633 | /********************/ | 633 | /********************/ |
| 634 | 634 | ||
| 635 | #define ATH9K_PCI_CUS198 0x0001 | 635 | #define ATH9K_PCI_CUS198 0x0001 |
| 636 | #define ATH9K_PCI_CUS230 0x0002 | 636 | #define ATH9K_PCI_CUS230 0x0002 |
| 637 | #define ATH9K_PCI_CUS217 0x0004 | 637 | #define ATH9K_PCI_CUS217 0x0004 |
| 638 | #define ATH9K_PCI_CUS252 0x0008 | 638 | #define ATH9K_PCI_CUS252 0x0008 |
| 639 | #define ATH9K_PCI_WOW 0x0010 | 639 | #define ATH9K_PCI_WOW 0x0010 |
| 640 | #define ATH9K_PCI_BT_ANT_DIV 0x0020 | 640 | #define ATH9K_PCI_BT_ANT_DIV 0x0020 |
| 641 | #define ATH9K_PCI_D3_L1_WAR 0x0040 | 641 | #define ATH9K_PCI_D3_L1_WAR 0x0040 |
| 642 | #define ATH9K_PCI_AR9565_1ANT 0x0080 | 642 | #define ATH9K_PCI_AR9565_1ANT 0x0080 |
| 643 | #define ATH9K_PCI_AR9565_2ANT 0x0100 | 643 | #define ATH9K_PCI_AR9565_2ANT 0x0100 |
| 644 | #define ATH9K_PCI_NO_PLL_PWRSAVE 0x0200 | ||
| 644 | 645 | ||
| 645 | /* | 646 | /* |
| 646 | * Default cache line size, in bytes. | 647 | * Default cache line size, in bytes. |
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c index 90b8342d1ed4..8824610c21fb 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_debug.c +++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c | |||
| @@ -44,14 +44,20 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf, | |||
| 44 | if (buf == NULL) | 44 | if (buf == NULL) |
| 45 | return -ENOMEM; | 45 | return -ENOMEM; |
| 46 | 46 | ||
| 47 | if (sc->dfs_detector) | ||
| 48 | dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector); | ||
| 49 | |||
| 50 | len += scnprintf(buf + len, size - len, "DFS support for " | 47 | len += scnprintf(buf + len, size - len, "DFS support for " |
| 51 | "macVersion = 0x%x, macRev = 0x%x: %s\n", | 48 | "macVersion = 0x%x, macRev = 0x%x: %s\n", |
| 52 | hw_ver->macVersion, hw_ver->macRev, | 49 | hw_ver->macVersion, hw_ver->macRev, |
| 53 | (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ? | 50 | (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ? |
| 54 | "enabled" : "disabled"); | 51 | "enabled" : "disabled"); |
| 52 | |||
| 53 | if (!sc->dfs_detector) { | ||
| 54 | len += scnprintf(buf + len, size - len, | ||
| 55 | "DFS detector not enabled\n"); | ||
| 56 | goto exit; | ||
| 57 | } | ||
| 58 | |||
| 59 | dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector); | ||
| 60 | |||
| 55 | len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n"); | 61 | len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n"); |
| 56 | ATH9K_DFS_STAT("pulse events reported ", pulses_total); | 62 | ATH9K_DFS_STAT("pulse events reported ", pulses_total); |
| 57 | ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs); | 63 | ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs); |
| @@ -76,6 +82,7 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf, | |||
| 76 | ATH9K_DFS_POOL_STAT("Seqs. alloc error ", pseq_alloc_error); | 82 | ATH9K_DFS_POOL_STAT("Seqs. alloc error ", pseq_alloc_error); |
| 77 | ATH9K_DFS_POOL_STAT("Seqs. in use ", pseq_used); | 83 | ATH9K_DFS_POOL_STAT("Seqs. in use ", pseq_used); |
| 78 | 84 | ||
| 85 | exit: | ||
| 79 | if (len > size) | 86 | if (len > size) |
| 80 | len = size; | 87 | len = size; |
| 81 | 88 | ||
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 9ea24f1cba73..a2c9a5dbac6b 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h | |||
| @@ -316,6 +316,7 @@ struct ath9k_ops_config { | |||
| 316 | u32 ant_ctrl_comm2g_switch_enable; | 316 | u32 ant_ctrl_comm2g_switch_enable; |
| 317 | bool xatten_margin_cfg; | 317 | bool xatten_margin_cfg; |
| 318 | bool alt_mingainidx; | 318 | bool alt_mingainidx; |
| 319 | bool no_pll_pwrsave; | ||
| 319 | }; | 320 | }; |
| 320 | 321 | ||
| 321 | enum ath9k_int { | 322 | enum ath9k_int { |
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index d8643ebabd30..710192ed27ed 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c | |||
| @@ -609,6 +609,11 @@ static void ath9k_init_platform(struct ath_softc *sc) | |||
| 609 | ah->config.pcie_waen = 0x0040473b; | 609 | ah->config.pcie_waen = 0x0040473b; |
| 610 | ath_info(common, "Enable WAR for ASPM D3/L1\n"); | 610 | ath_info(common, "Enable WAR for ASPM D3/L1\n"); |
| 611 | } | 611 | } |
| 612 | |||
| 613 | if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) { | ||
| 614 | ah->config.no_pll_pwrsave = true; | ||
| 615 | ath_info(common, "Disable PLL PowerSave\n"); | ||
| 616 | } | ||
| 612 | } | 617 | } |
| 613 | 618 | ||
| 614 | static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, | 619 | static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, |
| @@ -863,8 +868,8 @@ static const struct ieee80211_iface_combination if_comb[] = { | |||
| 863 | .max_interfaces = 1, | 868 | .max_interfaces = 1, |
| 864 | .num_different_channels = 1, | 869 | .num_different_channels = 1, |
| 865 | .beacon_int_infra_match = true, | 870 | .beacon_int_infra_match = true, |
| 866 | .radar_detect_widths = BIT(NL80211_CHAN_NO_HT) | | 871 | .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | |
| 867 | BIT(NL80211_CHAN_HT20), | 872 | BIT(NL80211_CHAN_WIDTH_20), |
| 868 | } | 873 | } |
| 869 | }; | 874 | }; |
| 870 | 875 | ||
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 7e4c2524b630..b5656fce4ff5 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c | |||
| @@ -195,6 +195,93 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { | |||
| 195 | 0x3219), | 195 | 0x3219), |
| 196 | .driver_data = ATH9K_PCI_BT_ANT_DIV }, | 196 | .driver_data = ATH9K_PCI_BT_ANT_DIV }, |
| 197 | 197 | ||
| 198 | /* AR9485 cards with PLL power-save disabled by default. */ | ||
| 199 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 200 | 0x0032, | ||
| 201 | PCI_VENDOR_ID_AZWAVE, | ||
| 202 | 0x2C97), | ||
| 203 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 204 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 205 | 0x0032, | ||
| 206 | PCI_VENDOR_ID_AZWAVE, | ||
| 207 | 0x2100), | ||
| 208 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 209 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 210 | 0x0032, | ||
| 211 | 0x1C56, /* ASKEY */ | ||
| 212 | 0x4001), | ||
| 213 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 214 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 215 | 0x0032, | ||
| 216 | 0x11AD, /* LITEON */ | ||
| 217 | 0x6627), | ||
| 218 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 219 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 220 | 0x0032, | ||
| 221 | 0x11AD, /* LITEON */ | ||
| 222 | 0x6628), | ||
| 223 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 224 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 225 | 0x0032, | ||
| 226 | PCI_VENDOR_ID_FOXCONN, | ||
| 227 | 0xE04E), | ||
| 228 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 229 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 230 | 0x0032, | ||
| 231 | PCI_VENDOR_ID_FOXCONN, | ||
| 232 | 0xE04F), | ||
| 233 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 234 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 235 | 0x0032, | ||
| 236 | 0x144F, /* ASKEY */ | ||
| 237 | 0x7197), | ||
| 238 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 239 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 240 | 0x0032, | ||
| 241 | 0x1B9A, /* XAVI */ | ||
| 242 | 0x2000), | ||
| 243 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 244 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 245 | 0x0032, | ||
| 246 | 0x1B9A, /* XAVI */ | ||
| 247 | 0x2001), | ||
| 248 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 249 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 250 | 0x0032, | ||
| 251 | PCI_VENDOR_ID_AZWAVE, | ||
| 252 | 0x1186), | ||
| 253 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 254 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 255 | 0x0032, | ||
| 256 | PCI_VENDOR_ID_AZWAVE, | ||
| 257 | 0x1F86), | ||
| 258 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 259 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 260 | 0x0032, | ||
| 261 | PCI_VENDOR_ID_AZWAVE, | ||
| 262 | 0x1195), | ||
| 263 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 264 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 265 | 0x0032, | ||
| 266 | PCI_VENDOR_ID_AZWAVE, | ||
| 267 | 0x1F95), | ||
| 268 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 269 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 270 | 0x0032, | ||
| 271 | 0x1B9A, /* XAVI */ | ||
| 272 | 0x1C00), | ||
| 273 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 274 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 275 | 0x0032, | ||
| 276 | 0x1B9A, /* XAVI */ | ||
| 277 | 0x1C01), | ||
| 278 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 279 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
| 280 | 0x0032, | ||
| 281 | PCI_VENDOR_ID_ASUSTEK, | ||
| 282 | 0x850D), | ||
| 283 | .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, | ||
| 284 | |||
| 198 | { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ | 285 | { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ |
| 199 | { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ | 286 | { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ |
| 200 | 287 | ||
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c index 5b84f7ae0b1e..ef44a2da644d 100644 --- a/drivers/net/wireless/ath/wcn36xx/debug.c +++ b/drivers/net/wireless/ath/wcn36xx/debug.c | |||
| @@ -126,7 +126,7 @@ static ssize_t write_file_dump(struct file *file, | |||
| 126 | if (begin == NULL) | 126 | if (begin == NULL) |
| 127 | break; | 127 | break; |
| 128 | 128 | ||
| 129 | if (kstrtoul(begin, 0, (unsigned long *)(arg + i)) != 0) | 129 | if (kstrtou32(begin, 0, &arg[i]) != 0) |
| 130 | break; | 130 | break; |
| 131 | } | 131 | } |
| 132 | 132 | ||
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index f8c3a10510c2..de9eb2cfbf4b 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c | |||
| @@ -1286,7 +1286,8 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif, | |||
| 1286 | } else { | 1286 | } else { |
| 1287 | wcn36xx_err("Beacon is to big: beacon size=%d\n", | 1287 | wcn36xx_err("Beacon is to big: beacon size=%d\n", |
| 1288 | msg_body.beacon_length); | 1288 | msg_body.beacon_length); |
| 1289 | return -ENOMEM; | 1289 | ret = -ENOMEM; |
| 1290 | goto out; | ||
| 1290 | } | 1291 | } |
| 1291 | memcpy(msg_body.bssid, vif->addr, ETH_ALEN); | 1292 | memcpy(msg_body.bssid, vif->addr, ETH_ALEN); |
| 1292 | 1293 | ||
| @@ -1327,7 +1328,8 @@ int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn, | |||
| 1327 | if (skb->len > BEACON_TEMPLATE_SIZE) { | 1328 | if (skb->len > BEACON_TEMPLATE_SIZE) { |
| 1328 | wcn36xx_warn("probe response template is too big: %d\n", | 1329 | wcn36xx_warn("probe response template is too big: %d\n", |
| 1329 | skb->len); | 1330 | skb->len); |
| 1330 | return -E2BIG; | 1331 | ret = -E2BIG; |
| 1332 | goto out; | ||
| 1331 | } | 1333 | } |
| 1332 | 1334 | ||
| 1333 | msg.probe_resp_template_len = skb->len; | 1335 | msg.probe_resp_template_len = skb->len; |
| @@ -1606,7 +1608,8 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn, | |||
| 1606 | /* TODO: it also support ARP response type */ | 1608 | /* TODO: it also support ARP response type */ |
| 1607 | } else { | 1609 | } else { |
| 1608 | wcn36xx_warn("unknow keep alive packet type %d\n", packet_type); | 1610 | wcn36xx_warn("unknow keep alive packet type %d\n", packet_type); |
| 1609 | return -EINVAL; | 1611 | ret = -EINVAL; |
| 1612 | goto out; | ||
| 1610 | } | 1613 | } |
| 1611 | 1614 | ||
| 1612 | PREPARE_HAL_BUF(wcn->hal_buf, msg_body); | 1615 | PREPARE_HAL_BUF(wcn->hal_buf, msg_body); |
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index 668dd27616a0..cc6a0a586f0b 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c | |||
| @@ -913,7 +913,10 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, | |||
| 913 | char *p2; | 913 | char *p2; |
| 914 | struct debug_data *d = f->private_data; | 914 | struct debug_data *d = f->private_data; |
| 915 | 915 | ||
| 916 | pdata = kmalloc(cnt, GFP_KERNEL); | 916 | if (cnt == 0) |
| 917 | return 0; | ||
| 918 | |||
| 919 | pdata = kmalloc(cnt + 1, GFP_KERNEL); | ||
| 917 | if (pdata == NULL) | 920 | if (pdata == NULL) |
| 918 | return 0; | 921 | return 0; |
| 919 | 922 | ||
| @@ -922,6 +925,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, | |||
| 922 | kfree(pdata); | 925 | kfree(pdata); |
| 923 | return 0; | 926 | return 0; |
| 924 | } | 927 | } |
| 928 | pdata[cnt] = '\0'; | ||
| 925 | 929 | ||
| 926 | p0 = pdata; | 930 | p0 = pdata; |
| 927 | for (i = 0; i < num_of_items; i++) { | 931 | for (i = 0; i < num_of_items; i++) { |
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c index ef8c98e21098..f499efc6abcf 100644 --- a/drivers/net/wireless/libertas/if_cs.c +++ b/drivers/net/wireless/libertas/if_cs.c | |||
| @@ -902,6 +902,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev) | |||
| 902 | if (card->model == MODEL_UNKNOWN) { | 902 | if (card->model == MODEL_UNKNOWN) { |
| 903 | pr_err("unsupported manf_id 0x%04x / card_id 0x%04x\n", | 903 | pr_err("unsupported manf_id 0x%04x / card_id 0x%04x\n", |
| 904 | p_dev->manf_id, p_dev->card_id); | 904 | p_dev->manf_id, p_dev->card_id); |
| 905 | ret = -ENODEV; | ||
| 905 | goto out2; | 906 | goto out2; |
| 906 | } | 907 | } |
| 907 | 908 | ||
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index de0df86704e7..9df7bc91a26f 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -2097,7 +2097,7 @@ out: | |||
| 2097 | } | 2097 | } |
| 2098 | 2098 | ||
| 2099 | /* Generic Netlink operations array */ | 2099 | /* Generic Netlink operations array */ |
| 2100 | static struct genl_ops hwsim_ops[] = { | 2100 | static const struct genl_ops hwsim_ops[] = { |
| 2101 | { | 2101 | { |
| 2102 | .cmd = HWSIM_CMD_REGISTER, | 2102 | .cmd = HWSIM_CMD_REGISTER, |
| 2103 | .policy = hwsim_genl_policy, | 2103 | .policy = hwsim_genl_policy, |
| @@ -2148,8 +2148,7 @@ static int hwsim_init_netlink(void) | |||
| 2148 | 2148 | ||
| 2149 | printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); | 2149 | printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); |
| 2150 | 2150 | ||
| 2151 | rc = genl_register_family_with_ops(&hwsim_genl_family, | 2151 | rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops); |
| 2152 | hwsim_ops, ARRAY_SIZE(hwsim_ops)); | ||
| 2153 | if (rc) | 2152 | if (rc) |
| 2154 | goto failure; | 2153 | goto failure; |
| 2155 | 2154 | ||
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index f80f30b6160e..c8385ec77a86 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h | |||
| @@ -1020,8 +1020,8 @@ struct mwifiex_power_group { | |||
| 1020 | } __packed; | 1020 | } __packed; |
| 1021 | 1021 | ||
| 1022 | struct mwifiex_types_power_group { | 1022 | struct mwifiex_types_power_group { |
| 1023 | u16 type; | 1023 | __le16 type; |
| 1024 | u16 length; | 1024 | __le16 length; |
| 1025 | } __packed; | 1025 | } __packed; |
| 1026 | 1026 | ||
| 1027 | struct host_cmd_ds_txpwr_cfg { | 1027 | struct host_cmd_ds_txpwr_cfg { |
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c index 220af4fe0fc6..81ac001ee741 100644 --- a/drivers/net/wireless/mwifiex/ie.c +++ b/drivers/net/wireless/mwifiex/ie.c | |||
| @@ -82,7 +82,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, | |||
| 82 | struct mwifiex_ie_list *ie_list) | 82 | struct mwifiex_ie_list *ie_list) |
| 83 | { | 83 | { |
| 84 | u16 travel_len, index, mask; | 84 | u16 travel_len, index, mask; |
| 85 | s16 input_len; | 85 | s16 input_len, tlv_len; |
| 86 | struct mwifiex_ie *ie; | 86 | struct mwifiex_ie *ie; |
| 87 | u8 *tmp; | 87 | u8 *tmp; |
| 88 | 88 | ||
| @@ -91,11 +91,13 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, | |||
| 91 | 91 | ||
| 92 | ie_list->len = 0; | 92 | ie_list->len = 0; |
| 93 | 93 | ||
| 94 | while (input_len > 0) { | 94 | while (input_len >= sizeof(struct mwifiex_ie_types_header)) { |
| 95 | ie = (struct mwifiex_ie *)(((u8 *)ie_list) + travel_len); | 95 | ie = (struct mwifiex_ie *)(((u8 *)ie_list) + travel_len); |
| 96 | input_len -= le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE; | 96 | tlv_len = le16_to_cpu(ie->ie_length); |
| 97 | travel_len += le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE; | 97 | travel_len += tlv_len + MWIFIEX_IE_HDR_SIZE; |
| 98 | 98 | ||
| 99 | if (input_len < tlv_len + MWIFIEX_IE_HDR_SIZE) | ||
| 100 | return -1; | ||
| 99 | index = le16_to_cpu(ie->ie_index); | 101 | index = le16_to_cpu(ie->ie_index); |
| 100 | mask = le16_to_cpu(ie->mgmt_subtype_mask); | 102 | mask = le16_to_cpu(ie->mgmt_subtype_mask); |
| 101 | 103 | ||
| @@ -132,6 +134,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, | |||
| 132 | le16_add_cpu(&ie_list->len, | 134 | le16_add_cpu(&ie_list->len, |
| 133 | le16_to_cpu(priv->mgmt_ie[index].ie_length) + | 135 | le16_to_cpu(priv->mgmt_ie[index].ie_length) + |
| 134 | MWIFIEX_IE_HDR_SIZE); | 136 | MWIFIEX_IE_HDR_SIZE); |
| 137 | input_len -= tlv_len + MWIFIEX_IE_HDR_SIZE; | ||
| 135 | } | 138 | } |
| 136 | 139 | ||
| 137 | if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) | 140 | if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) |
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 1576104e3d95..9bf8898743ab 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c | |||
| @@ -1029,7 +1029,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, | |||
| 1029 | struct sk_buff *skb, u32 upld_typ) | 1029 | struct sk_buff *skb, u32 upld_typ) |
| 1030 | { | 1030 | { |
| 1031 | u8 *cmd_buf; | 1031 | u8 *cmd_buf; |
| 1032 | __le16 *curr_ptr = (__le16 *)skb->data; | ||
| 1033 | u16 pkt_len = le16_to_cpu(*curr_ptr); | ||
| 1032 | 1034 | ||
| 1035 | skb_trim(skb, pkt_len); | ||
| 1033 | skb_pull(skb, INTF_HEADER_LEN); | 1036 | skb_pull(skb, INTF_HEADER_LEN); |
| 1034 | 1037 | ||
| 1035 | switch (upld_typ) { | 1038 | switch (upld_typ) { |
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 7d66018a2e33..2181ee283d82 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c | |||
| @@ -239,14 +239,14 @@ static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd, | |||
| 239 | memmove(cmd_txp_cfg, txp, | 239 | memmove(cmd_txp_cfg, txp, |
| 240 | sizeof(struct host_cmd_ds_txpwr_cfg) + | 240 | sizeof(struct host_cmd_ds_txpwr_cfg) + |
| 241 | sizeof(struct mwifiex_types_power_group) + | 241 | sizeof(struct mwifiex_types_power_group) + |
| 242 | pg_tlv->length); | 242 | le16_to_cpu(pg_tlv->length)); |
| 243 | 243 | ||
| 244 | pg_tlv = (struct mwifiex_types_power_group *) ((u8 *) | 244 | pg_tlv = (struct mwifiex_types_power_group *) ((u8 *) |
| 245 | cmd_txp_cfg + | 245 | cmd_txp_cfg + |
| 246 | sizeof(struct host_cmd_ds_txpwr_cfg)); | 246 | sizeof(struct host_cmd_ds_txpwr_cfg)); |
| 247 | cmd->size = cpu_to_le16(le16_to_cpu(cmd->size) + | 247 | cmd->size = cpu_to_le16(le16_to_cpu(cmd->size) + |
| 248 | sizeof(struct mwifiex_types_power_group) + | 248 | sizeof(struct mwifiex_types_power_group) + |
| 249 | pg_tlv->length); | 249 | le16_to_cpu(pg_tlv->length)); |
| 250 | } else { | 250 | } else { |
| 251 | memmove(cmd_txp_cfg, txp, sizeof(*txp)); | 251 | memmove(cmd_txp_cfg, txp, sizeof(*txp)); |
| 252 | } | 252 | } |
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 58a6013712d2..2675ca7f8d14 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c | |||
| @@ -274,17 +274,20 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv, | |||
| 274 | struct host_cmd_ds_tx_rate_cfg *rate_cfg = &resp->params.tx_rate_cfg; | 274 | struct host_cmd_ds_tx_rate_cfg *rate_cfg = &resp->params.tx_rate_cfg; |
| 275 | struct mwifiex_rate_scope *rate_scope; | 275 | struct mwifiex_rate_scope *rate_scope; |
| 276 | struct mwifiex_ie_types_header *head; | 276 | struct mwifiex_ie_types_header *head; |
| 277 | u16 tlv, tlv_buf_len; | 277 | u16 tlv, tlv_buf_len, tlv_buf_left; |
| 278 | u8 *tlv_buf; | 278 | u8 *tlv_buf; |
| 279 | u32 i; | 279 | u32 i; |
| 280 | 280 | ||
| 281 | tlv_buf = ((u8 *)rate_cfg) + | 281 | tlv_buf = ((u8 *)rate_cfg) + sizeof(struct host_cmd_ds_tx_rate_cfg); |
| 282 | sizeof(struct host_cmd_ds_tx_rate_cfg); | 282 | tlv_buf_left = le16_to_cpu(resp->size) - S_DS_GEN - sizeof(*rate_cfg); |
| 283 | tlv_buf_len = le16_to_cpu(*(__le16 *) (tlv_buf + sizeof(u16))); | ||
| 284 | 283 | ||
| 285 | while (tlv_buf && tlv_buf_len > 0) { | 284 | while (tlv_buf_left >= sizeof(*head)) { |
| 286 | tlv = (*tlv_buf); | 285 | head = (struct mwifiex_ie_types_header *)tlv_buf; |
| 287 | tlv = tlv | (*(tlv_buf + 1) << 8); | 286 | tlv = le16_to_cpu(head->type); |
| 287 | tlv_buf_len = le16_to_cpu(head->len); | ||
| 288 | |||
| 289 | if (tlv_buf_left < (sizeof(*head) + tlv_buf_len)) | ||
| 290 | break; | ||
| 288 | 291 | ||
| 289 | switch (tlv) { | 292 | switch (tlv) { |
| 290 | case TLV_TYPE_RATE_SCOPE: | 293 | case TLV_TYPE_RATE_SCOPE: |
| @@ -304,9 +307,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv, | |||
| 304 | /* Add RATE_DROP tlv here */ | 307 | /* Add RATE_DROP tlv here */ |
| 305 | } | 308 | } |
| 306 | 309 | ||
| 307 | head = (struct mwifiex_ie_types_header *) tlv_buf; | 310 | tlv_buf += (sizeof(*head) + tlv_buf_len); |
| 308 | tlv_buf += le16_to_cpu(head->len) + sizeof(*head); | 311 | tlv_buf_left -= (sizeof(*head) + tlv_buf_len); |
| 309 | tlv_buf_len -= le16_to_cpu(head->len); | ||
| 310 | } | 312 | } |
| 311 | 313 | ||
| 312 | priv->is_data_rate_auto = mwifiex_is_rate_auto(priv); | 314 | priv->is_data_rate_auto = mwifiex_is_rate_auto(priv); |
| @@ -340,13 +342,17 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf) | |||
| 340 | ((u8 *) data_buf + sizeof(struct host_cmd_ds_txpwr_cfg)); | 342 | ((u8 *) data_buf + sizeof(struct host_cmd_ds_txpwr_cfg)); |
| 341 | pg = (struct mwifiex_power_group *) | 343 | pg = (struct mwifiex_power_group *) |
| 342 | ((u8 *) pg_tlv_hdr + sizeof(struct mwifiex_types_power_group)); | 344 | ((u8 *) pg_tlv_hdr + sizeof(struct mwifiex_types_power_group)); |
| 343 | length = pg_tlv_hdr->length; | 345 | length = le16_to_cpu(pg_tlv_hdr->length); |
| 344 | if (length > 0) { | 346 | |
| 345 | max_power = pg->power_max; | 347 | /* At least one structure required to update power */ |
| 346 | min_power = pg->power_min; | 348 | if (length < sizeof(struct mwifiex_power_group)) |
| 347 | length -= sizeof(struct mwifiex_power_group); | 349 | return 0; |
| 348 | } | 350 | |
| 349 | while (length) { | 351 | max_power = pg->power_max; |
| 352 | min_power = pg->power_min; | ||
| 353 | length -= sizeof(struct mwifiex_power_group); | ||
| 354 | |||
| 355 | while (length >= sizeof(struct mwifiex_power_group)) { | ||
| 350 | pg++; | 356 | pg++; |
| 351 | if (max_power < pg->power_max) | 357 | if (max_power < pg->power_max) |
| 352 | max_power = pg->power_max; | 358 | max_power = pg->power_max; |
| @@ -356,10 +362,8 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf) | |||
| 356 | 362 | ||
| 357 | length -= sizeof(struct mwifiex_power_group); | 363 | length -= sizeof(struct mwifiex_power_group); |
| 358 | } | 364 | } |
| 359 | if (pg_tlv_hdr->length > 0) { | 365 | priv->min_tx_power_level = (u8) min_power; |
| 360 | priv->min_tx_power_level = (u8) min_power; | 366 | priv->max_tx_power_level = (u8) max_power; |
| 361 | priv->max_tx_power_level = (u8) max_power; | ||
| 362 | } | ||
| 363 | 367 | ||
| 364 | return 0; | 368 | return 0; |
| 365 | } | 369 | } |
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index f084412eee0b..c8e029df770e 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c | |||
| @@ -638,8 +638,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, | |||
| 638 | txp_cfg->mode = cpu_to_le32(1); | 638 | txp_cfg->mode = cpu_to_le32(1); |
| 639 | pg_tlv = (struct mwifiex_types_power_group *) | 639 | pg_tlv = (struct mwifiex_types_power_group *) |
| 640 | (buf + sizeof(struct host_cmd_ds_txpwr_cfg)); | 640 | (buf + sizeof(struct host_cmd_ds_txpwr_cfg)); |
| 641 | pg_tlv->type = TLV_TYPE_POWER_GROUP; | 641 | pg_tlv->type = cpu_to_le16(TLV_TYPE_POWER_GROUP); |
| 642 | pg_tlv->length = 4 * sizeof(struct mwifiex_power_group); | 642 | pg_tlv->length = |
| 643 | cpu_to_le16(4 * sizeof(struct mwifiex_power_group)); | ||
| 643 | pg = (struct mwifiex_power_group *) | 644 | pg = (struct mwifiex_power_group *) |
| 644 | (buf + sizeof(struct host_cmd_ds_txpwr_cfg) | 645 | (buf + sizeof(struct host_cmd_ds_txpwr_cfg) |
| 645 | + sizeof(struct mwifiex_types_power_group)); | 646 | + sizeof(struct mwifiex_types_power_group)); |
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c index 1cfe5a738c47..92f76d655e6c 100644 --- a/drivers/net/wireless/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/mwifiex/uap_txrx.c | |||
| @@ -97,6 +97,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, | |||
| 97 | struct mwifiex_txinfo *tx_info; | 97 | struct mwifiex_txinfo *tx_info; |
| 98 | int hdr_chop; | 98 | int hdr_chop; |
| 99 | struct timeval tv; | 99 | struct timeval tv; |
| 100 | struct ethhdr *p_ethhdr; | ||
| 100 | u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; | 101 | u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; |
| 101 | 102 | ||
| 102 | uap_rx_pd = (struct uap_rxpd *)(skb->data); | 103 | uap_rx_pd = (struct uap_rxpd *)(skb->data); |
| @@ -112,14 +113,36 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, | |||
| 112 | } | 113 | } |
| 113 | 114 | ||
| 114 | if (!memcmp(&rx_pkt_hdr->rfc1042_hdr, | 115 | if (!memcmp(&rx_pkt_hdr->rfc1042_hdr, |
| 115 | rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) | 116 | rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) { |
| 117 | /* Replace the 803 header and rfc1042 header (llc/snap) with | ||
| 118 | * an Ethernet II header, keep the src/dst and snap_type | ||
| 119 | * (ethertype). | ||
| 120 | * | ||
| 121 | * The firmware only passes up SNAP frames converting all RX | ||
| 122 | * data from 802.11 to 802.2/LLC/SNAP frames. | ||
| 123 | * | ||
| 124 | * To create the Ethernet II, just move the src, dst address | ||
| 125 | * right before the snap_type. | ||
| 126 | */ | ||
| 127 | p_ethhdr = (struct ethhdr *) | ||
| 128 | ((u8 *)(&rx_pkt_hdr->eth803_hdr) | ||
| 129 | + sizeof(rx_pkt_hdr->eth803_hdr) | ||
| 130 | + sizeof(rx_pkt_hdr->rfc1042_hdr) | ||
| 131 | - sizeof(rx_pkt_hdr->eth803_hdr.h_dest) | ||
| 132 | - sizeof(rx_pkt_hdr->eth803_hdr.h_source) | ||
| 133 | - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type)); | ||
| 134 | memcpy(p_ethhdr->h_source, rx_pkt_hdr->eth803_hdr.h_source, | ||
| 135 | sizeof(p_ethhdr->h_source)); | ||
| 136 | memcpy(p_ethhdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest, | ||
| 137 | sizeof(p_ethhdr->h_dest)); | ||
| 116 | /* Chop off the rxpd + the excess memory from | 138 | /* Chop off the rxpd + the excess memory from |
| 117 | * 802.2/llc/snap header that was removed. | 139 | * 802.2/llc/snap header that was removed. |
| 118 | */ | 140 | */ |
| 119 | hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd; | 141 | hdr_chop = (u8 *)p_ethhdr - (u8 *)uap_rx_pd; |
| 120 | else | 142 | } else { |
| 121 | /* Chop off the rxpd */ | 143 | /* Chop off the rxpd */ |
| 122 | hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd; | 144 | hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd; |
| 145 | } | ||
| 123 | 146 | ||
| 124 | /* Chop off the leading header bytes so the it points | 147 | /* Chop off the leading header bytes so the it points |
| 125 | * to the start of either the reconstructed EthII frame | 148 | * to the start of either the reconstructed EthII frame |
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 5dd0ccc70b86..13eaeed03898 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c | |||
| @@ -722,6 +722,9 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, | |||
| 722 | tlv_hdr = (struct mwifiex_ie_types_data *) curr; | 722 | tlv_hdr = (struct mwifiex_ie_types_data *) curr; |
| 723 | tlv_len = le16_to_cpu(tlv_hdr->header.len); | 723 | tlv_len = le16_to_cpu(tlv_hdr->header.len); |
| 724 | 724 | ||
| 725 | if (resp_len < tlv_len + sizeof(tlv_hdr->header)) | ||
| 726 | break; | ||
| 727 | |||
| 725 | switch (le16_to_cpu(tlv_hdr->header.type)) { | 728 | switch (le16_to_cpu(tlv_hdr->header.type)) { |
| 726 | case TLV_TYPE_WMMQSTATUS: | 729 | case TLV_TYPE_WMMQSTATUS: |
| 727 | tlv_wmm_qstatus = | 730 | tlv_wmm_qstatus = |
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 41a16d30c79c..e05d9b4c8317 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c | |||
| @@ -811,6 +811,10 @@ static const struct net_device_ops islpci_netdev_ops = { | |||
| 811 | .ndo_validate_addr = eth_validate_addr, | 811 | .ndo_validate_addr = eth_validate_addr, |
| 812 | }; | 812 | }; |
| 813 | 813 | ||
| 814 | static struct device_type wlan_type = { | ||
| 815 | .name = "wlan", | ||
| 816 | }; | ||
| 817 | |||
| 814 | struct net_device * | 818 | struct net_device * |
| 815 | islpci_setup(struct pci_dev *pdev) | 819 | islpci_setup(struct pci_dev *pdev) |
| 816 | { | 820 | { |
| @@ -821,9 +825,8 @@ islpci_setup(struct pci_dev *pdev) | |||
| 821 | return ndev; | 825 | return ndev; |
| 822 | 826 | ||
| 823 | pci_set_drvdata(pdev, ndev); | 827 | pci_set_drvdata(pdev, ndev); |
| 824 | #if defined(SET_NETDEV_DEV) | ||
| 825 | SET_NETDEV_DEV(ndev, &pdev->dev); | 828 | SET_NETDEV_DEV(ndev, &pdev->dev); |
| 826 | #endif | 829 | SET_NETDEV_DEVTYPE(ndev, &wlan_type); |
| 827 | 830 | ||
| 828 | /* setup the structure members */ | 831 | /* setup the structure members */ |
| 829 | ndev->base_addr = pci_resource_start(pdev, 0); | 832 | ndev->base_addr = pci_resource_start(pdev, 0); |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index c5738f14c4ba..776aff3678ff 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c | |||
| @@ -2640,7 +2640,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, | |||
| 2640 | 2640 | ||
| 2641 | if (rt2x00_rt(rt2x00dev, RT5392)) { | 2641 | if (rt2x00_rt(rt2x00dev, RT5392)) { |
| 2642 | rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr); | 2642 | rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr); |
| 2643 | if (info->default_power1 > POWER_BOUND) | 2643 | if (info->default_power2 > POWER_BOUND) |
| 2644 | rt2x00_set_field8(&rfcsr, RFCSR50_TX, POWER_BOUND); | 2644 | rt2x00_set_field8(&rfcsr, RFCSR50_TX, POWER_BOUND); |
| 2645 | else | 2645 | else |
| 2646 | rt2x00_set_field8(&rfcsr, RFCSR50_TX, | 2646 | rt2x00_set_field8(&rfcsr, RFCSR50_TX, |
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h index a0935987fa3a..7f40ab8e1bd8 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h | |||
| @@ -146,7 +146,7 @@ void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length); | |||
| 146 | * @local: frame is not from mac80211 | 146 | * @local: frame is not from mac80211 |
| 147 | */ | 147 | */ |
| 148 | int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, | 148 | int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, |
| 149 | bool local); | 149 | struct ieee80211_sta *sta, bool local); |
| 150 | 150 | ||
| 151 | /** | 151 | /** |
| 152 | * rt2x00queue_update_beacon - Send new beacon from mac80211 | 152 | * rt2x00queue_update_beacon - Send new beacon from mac80211 |
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index 7c157857f5ce..2183e7978399 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c | |||
| @@ -90,7 +90,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, | |||
| 90 | frag_skb->data, data_length, tx_info, | 90 | frag_skb->data, data_length, tx_info, |
| 91 | (struct ieee80211_rts *)(skb->data)); | 91 | (struct ieee80211_rts *)(skb->data)); |
| 92 | 92 | ||
| 93 | retval = rt2x00queue_write_tx_frame(queue, skb, true); | 93 | retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true); |
| 94 | if (retval) { | 94 | if (retval) { |
| 95 | dev_kfree_skb_any(skb); | 95 | dev_kfree_skb_any(skb); |
| 96 | rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n"); | 96 | rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n"); |
| @@ -151,7 +151,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, | |||
| 151 | goto exit_fail; | 151 | goto exit_fail; |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false))) | 154 | if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false))) |
| 155 | goto exit_fail; | 155 | goto exit_fail; |
| 156 | 156 | ||
| 157 | /* | 157 | /* |
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 50590b1420a5..a5d38e8ad9e4 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c | |||
| @@ -635,7 +635,7 @@ static void rt2x00queue_bar_check(struct queue_entry *entry) | |||
| 635 | } | 635 | } |
| 636 | 636 | ||
| 637 | int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, | 637 | int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, |
| 638 | bool local) | 638 | struct ieee80211_sta *sta, bool local) |
| 639 | { | 639 | { |
| 640 | struct ieee80211_tx_info *tx_info; | 640 | struct ieee80211_tx_info *tx_info; |
| 641 | struct queue_entry *entry; | 641 | struct queue_entry *entry; |
| @@ -649,7 +649,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, | |||
| 649 | * after that we are free to use the skb->cb array | 649 | * after that we are free to use the skb->cb array |
| 650 | * for our information. | 650 | * for our information. |
| 651 | */ | 651 | */ |
| 652 | rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL); | 652 | rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta); |
| 653 | 653 | ||
| 654 | /* | 654 | /* |
| 655 | * All information is retrieved from the skb->cb array, | 655 | * All information is retrieved from the skb->cb array, |
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 9a78e3daf742..ff784072fb42 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | 37 | ||
| 38 | #include <linux/ip.h> | 38 | #include <linux/ip.h> |
| 39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
| 40 | #include <linux/udp.h> | ||
| 40 | 41 | ||
| 41 | /* | 42 | /* |
| 42 | *NOTICE!!!: This file will be very big, we should | 43 | *NOTICE!!!: This file will be very big, we should |
| @@ -1074,64 +1075,52 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) | |||
| 1074 | if (!ieee80211_is_data(fc)) | 1075 | if (!ieee80211_is_data(fc)) |
| 1075 | return false; | 1076 | return false; |
| 1076 | 1077 | ||
| 1078 | ip = (const struct iphdr *)(skb->data + mac_hdr_len + | ||
| 1079 | SNAP_SIZE + PROTOC_TYPE_SIZE); | ||
| 1080 | ether_type = be16_to_cpup((__be16 *) | ||
| 1081 | (skb->data + mac_hdr_len + SNAP_SIZE)); | ||
| 1077 | 1082 | ||
| 1078 | ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len + | 1083 | switch (ether_type) { |
| 1079 | SNAP_SIZE + PROTOC_TYPE_SIZE); | 1084 | case ETH_P_IP: { |
| 1080 | ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE); | 1085 | struct udphdr *udp; |
| 1081 | /* ether_type = ntohs(ether_type); */ | 1086 | u16 src; |
| 1082 | 1087 | u16 dst; | |
| 1083 | if (ETH_P_IP == ether_type) { | ||
| 1084 | if (IPPROTO_UDP == ip->protocol) { | ||
| 1085 | struct udphdr *udp = (struct udphdr *)((u8 *) ip + | ||
| 1086 | (ip->ihl << 2)); | ||
| 1087 | if (((((u8 *) udp)[1] == 68) && | ||
| 1088 | (((u8 *) udp)[3] == 67)) || | ||
| 1089 | ((((u8 *) udp)[1] == 67) && | ||
| 1090 | (((u8 *) udp)[3] == 68))) { | ||
| 1091 | /* | ||
| 1092 | * 68 : UDP BOOTP client | ||
| 1093 | * 67 : UDP BOOTP server | ||
| 1094 | */ | ||
| 1095 | RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), | ||
| 1096 | DBG_DMESG, "dhcp %s !!\n", | ||
| 1097 | is_tx ? "Tx" : "Rx"); | ||
| 1098 | |||
| 1099 | if (is_tx) { | ||
| 1100 | rtlpriv->enter_ps = false; | ||
| 1101 | schedule_work(&rtlpriv-> | ||
| 1102 | works.lps_change_work); | ||
| 1103 | ppsc->last_delaylps_stamp_jiffies = | ||
| 1104 | jiffies; | ||
| 1105 | } | ||
| 1106 | 1088 | ||
| 1107 | return true; | 1089 | if (ip->protocol != IPPROTO_UDP) |
| 1108 | } | 1090 | return false; |
| 1109 | } | 1091 | udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); |
| 1110 | } else if (ETH_P_ARP == ether_type) { | 1092 | src = be16_to_cpu(udp->source); |
| 1111 | if (is_tx) { | 1093 | dst = be16_to_cpu(udp->dest); |
| 1112 | rtlpriv->enter_ps = false; | ||
| 1113 | schedule_work(&rtlpriv->works.lps_change_work); | ||
| 1114 | ppsc->last_delaylps_stamp_jiffies = jiffies; | ||
| 1115 | } | ||
| 1116 | 1094 | ||
| 1117 | return true; | 1095 | /* If this case involves port 68 (UDP BOOTP client) connecting |
| 1118 | } else if (ETH_P_PAE == ether_type) { | 1096 | * with port 67 (UDP BOOTP server), then return true so that |
| 1097 | * the lowest speed is used. | ||
| 1098 | */ | ||
| 1099 | if (!((src == 68 && dst == 67) || (src == 67 && dst == 68))) | ||
| 1100 | return false; | ||
| 1101 | |||
| 1102 | RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, | ||
| 1103 | "dhcp %s !!\n", is_tx ? "Tx" : "Rx"); | ||
| 1104 | break; | ||
| 1105 | } | ||
| 1106 | case ETH_P_ARP: | ||
| 1107 | break; | ||
| 1108 | case ETH_P_PAE: | ||
| 1119 | RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, | 1109 | RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, |
| 1120 | "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); | 1110 | "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); |
| 1121 | 1111 | break; | |
| 1122 | if (is_tx) { | 1112 | case ETH_P_IPV6: |
| 1123 | rtlpriv->enter_ps = false; | 1113 | /* TODO: Is this right? */ |
| 1124 | schedule_work(&rtlpriv->works.lps_change_work); | 1114 | return false; |
| 1125 | ppsc->last_delaylps_stamp_jiffies = jiffies; | 1115 | default: |
| 1126 | } | 1116 | return false; |
| 1127 | |||
| 1128 | return true; | ||
| 1129 | } else if (ETH_P_IPV6 == ether_type) { | ||
| 1130 | /* IPv6 */ | ||
| 1131 | return true; | ||
| 1132 | } | 1117 | } |
| 1133 | 1118 | if (is_tx) { | |
| 1134 | return false; | 1119 | rtlpriv->enter_ps = false; |
| 1120 | schedule_work(&rtlpriv->works.lps_change_work); | ||
| 1121 | ppsc->last_delaylps_stamp_jiffies = jiffies; | ||
| 1122 | } | ||
| 1123 | return true; | ||
| 1135 | } | 1124 | } |
| 1136 | EXPORT_SYMBOL_GPL(rtl_is_special_data); | 1125 | EXPORT_SYMBOL_GPL(rtl_is_special_data); |
| 1137 | 1126 | ||
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c index ae13fb94b2e8..2ffc7298f686 100644 --- a/drivers/net/wireless/rtlwifi/efuse.c +++ b/drivers/net/wireless/rtlwifi/efuse.c | |||
| @@ -262,9 +262,9 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf) | |||
| 262 | sizeof(u8), GFP_ATOMIC); | 262 | sizeof(u8), GFP_ATOMIC); |
| 263 | if (!efuse_tbl) | 263 | if (!efuse_tbl) |
| 264 | return; | 264 | return; |
| 265 | efuse_word = kmalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC); | 265 | efuse_word = kzalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC); |
| 266 | if (!efuse_word) | 266 | if (!efuse_word) |
| 267 | goto done; | 267 | goto out; |
| 268 | for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { | 268 | for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { |
| 269 | efuse_word[i] = kmalloc(efuse_max_section * sizeof(u16), | 269 | efuse_word[i] = kmalloc(efuse_max_section * sizeof(u16), |
| 270 | GFP_ATOMIC); | 270 | GFP_ATOMIC); |
| @@ -378,6 +378,7 @@ done: | |||
| 378 | for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) | 378 | for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) |
| 379 | kfree(efuse_word[i]); | 379 | kfree(efuse_word[i]); |
| 380 | kfree(efuse_word); | 380 | kfree(efuse_word); |
| 381 | out: | ||
| 381 | kfree(efuse_tbl); | 382 | kfree(efuse_tbl); |
| 382 | } | 383 | } |
| 383 | 384 | ||
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c index 25e50ffc44ec..b0c346a9e4b8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c | |||
| @@ -349,7 +349,7 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, | |||
| 349 | p_drvinfo); | 349 | p_drvinfo); |
| 350 | } | 350 | } |
| 351 | /*rx_status->qual = stats->signal; */ | 351 | /*rx_status->qual = stats->signal; */ |
| 352 | rx_status->signal = stats->rssi + 10; | 352 | rx_status->signal = stats->recvsignalpower + 10; |
| 353 | return true; | 353 | return true; |
| 354 | } | 354 | } |
| 355 | 355 | ||
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c index 945ddecf90c9..0eb0f4ae5920 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c | |||
| @@ -525,7 +525,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, | |||
| 525 | p_drvinfo); | 525 | p_drvinfo); |
| 526 | } | 526 | } |
| 527 | /*rx_status->qual = stats->signal; */ | 527 | /*rx_status->qual = stats->signal; */ |
| 528 | rx_status->signal = stats->rssi + 10; | 528 | rx_status->signal = stats->recvsignalpower + 10; |
| 529 | return true; | 529 | return true; |
| 530 | } | 530 | } |
| 531 | 531 | ||
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c index 5061f1db3f02..92d38ab3c60e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c | |||
| @@ -265,7 +265,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw, | |||
| 265 | rtlefuse->pwrgroup_ht40 | 265 | rtlefuse->pwrgroup_ht40 |
| 266 | [RF90_PATH_A][chnl - 1]) { | 266 | [RF90_PATH_A][chnl - 1]) { |
| 267 | pwrdiff_limit[i] = | 267 | pwrdiff_limit[i] = |
| 268 | rtlefuse->pwrgroup_ht20 | 268 | rtlefuse->pwrgroup_ht40 |
| 269 | [RF90_PATH_A][chnl - 1]; | 269 | [RF90_PATH_A][chnl - 1]; |
| 270 | } | 270 | } |
| 271 | } else { | 271 | } else { |
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c index 222d2e792ca6..27efbcdac6a9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c | |||
| @@ -329,7 +329,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, | |||
| 329 | } | 329 | } |
| 330 | 330 | ||
| 331 | /*rx_status->qual = stats->signal; */ | 331 | /*rx_status->qual = stats->signal; */ |
| 332 | rx_status->signal = stats->rssi + 10; | 332 | rx_status->signal = stats->recvsignalpower + 10; |
| 333 | 333 | ||
| 334 | return true; | 334 | return true; |
| 335 | } | 335 | } |
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index d224dc3bb092..0c65386fa30d 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h | |||
| @@ -77,11 +77,7 @@ | |||
| 77 | #define RTL_SLOT_TIME_9 9 | 77 | #define RTL_SLOT_TIME_9 9 |
| 78 | #define RTL_SLOT_TIME_20 20 | 78 | #define RTL_SLOT_TIME_20 20 |
| 79 | 79 | ||
| 80 | /*related with tcp/ip. */ | 80 | /*related to tcp/ip. */ |
| 81 | /*if_ehther.h*/ | ||
| 82 | #define ETH_P_PAE 0x888E /*Port Access Entity (IEEE 802.1X) */ | ||
| 83 | #define ETH_P_IP 0x0800 /*Internet Protocol packet */ | ||
| 84 | #define ETH_P_ARP 0x0806 /*Address Resolution packet */ | ||
| 85 | #define SNAP_SIZE 6 | 81 | #define SNAP_SIZE 6 |
| 86 | #define PROTOC_TYPE_SIZE 2 | 82 | #define PROTOC_TYPE_SIZE 2 |
| 87 | 83 | ||
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index d85e66979711..e59acb1daa23 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
| @@ -277,12 +277,13 @@ static void xennet_alloc_rx_buffers(struct net_device *dev) | |||
| 277 | if (!page) { | 277 | if (!page) { |
| 278 | kfree_skb(skb); | 278 | kfree_skb(skb); |
| 279 | no_skb: | 279 | no_skb: |
| 280 | /* Any skbuffs queued for refill? Force them out. */ | ||
| 281 | if (i != 0) | ||
| 282 | goto refill; | ||
| 283 | /* Could not allocate any skbuffs. Try again later. */ | 280 | /* Could not allocate any skbuffs. Try again later. */ |
| 284 | mod_timer(&np->rx_refill_timer, | 281 | mod_timer(&np->rx_refill_timer, |
| 285 | jiffies + (HZ/10)); | 282 | jiffies + (HZ/10)); |
| 283 | |||
| 284 | /* Any skbuffs queued for refill? Force them out. */ | ||
| 285 | if (i != 0) | ||
| 286 | goto refill; | ||
| 286 | break; | 287 | break; |
| 287 | } | 288 | } |
| 288 | 289 | ||
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 12a9e83c008b..d0222f13d154 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c | |||
| @@ -1034,10 +1034,9 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, | |||
| 1034 | struct dma_chan *chan = qp->dma_chan; | 1034 | struct dma_chan *chan = qp->dma_chan; |
| 1035 | struct dma_device *device; | 1035 | struct dma_device *device; |
| 1036 | size_t pay_off, buff_off; | 1036 | size_t pay_off, buff_off; |
| 1037 | dma_addr_t src, dest; | 1037 | struct dmaengine_unmap_data *unmap; |
| 1038 | dma_cookie_t cookie; | 1038 | dma_cookie_t cookie; |
| 1039 | void *buf = entry->buf; | 1039 | void *buf = entry->buf; |
| 1040 | unsigned long flags; | ||
| 1041 | 1040 | ||
| 1042 | entry->len = len; | 1041 | entry->len = len; |
| 1043 | 1042 | ||
| @@ -1045,35 +1044,49 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, | |||
| 1045 | goto err; | 1044 | goto err; |
| 1046 | 1045 | ||
| 1047 | if (len < copy_bytes) | 1046 | if (len < copy_bytes) |
| 1048 | goto err1; | 1047 | goto err_wait; |
| 1049 | 1048 | ||
| 1050 | device = chan->device; | 1049 | device = chan->device; |
| 1051 | pay_off = (size_t) offset & ~PAGE_MASK; | 1050 | pay_off = (size_t) offset & ~PAGE_MASK; |
| 1052 | buff_off = (size_t) buf & ~PAGE_MASK; | 1051 | buff_off = (size_t) buf & ~PAGE_MASK; |
| 1053 | 1052 | ||
| 1054 | if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) | 1053 | if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) |
| 1055 | goto err1; | 1054 | goto err_wait; |
| 1056 | 1055 | ||
| 1057 | dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE); | 1056 | unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); |
| 1058 | if (dma_mapping_error(device->dev, dest)) | 1057 | if (!unmap) |
| 1059 | goto err1; | 1058 | goto err_wait; |
| 1060 | 1059 | ||
| 1061 | src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE); | 1060 | unmap->len = len; |
| 1062 | if (dma_mapping_error(device->dev, src)) | 1061 | unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), |
| 1063 | goto err2; | 1062 | pay_off, len, DMA_TO_DEVICE); |
| 1063 | if (dma_mapping_error(device->dev, unmap->addr[0])) | ||
| 1064 | goto err_get_unmap; | ||
| 1065 | |||
| 1066 | unmap->to_cnt = 1; | ||
| 1064 | 1067 | ||
| 1065 | flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE | | 1068 | unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), |
| 1066 | DMA_PREP_INTERRUPT; | 1069 | buff_off, len, DMA_FROM_DEVICE); |
| 1067 | txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); | 1070 | if (dma_mapping_error(device->dev, unmap->addr[1])) |
| 1071 | goto err_get_unmap; | ||
| 1072 | |||
| 1073 | unmap->from_cnt = 1; | ||
| 1074 | |||
| 1075 | txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], | ||
| 1076 | unmap->addr[0], len, | ||
| 1077 | DMA_PREP_INTERRUPT); | ||
| 1068 | if (!txd) | 1078 | if (!txd) |
| 1069 | goto err3; | 1079 | goto err_get_unmap; |
| 1070 | 1080 | ||
| 1071 | txd->callback = ntb_rx_copy_callback; | 1081 | txd->callback = ntb_rx_copy_callback; |
| 1072 | txd->callback_param = entry; | 1082 | txd->callback_param = entry; |
| 1083 | dma_set_unmap(txd, unmap); | ||
| 1073 | 1084 | ||
| 1074 | cookie = dmaengine_submit(txd); | 1085 | cookie = dmaengine_submit(txd); |
| 1075 | if (dma_submit_error(cookie)) | 1086 | if (dma_submit_error(cookie)) |
| 1076 | goto err3; | 1087 | goto err_set_unmap; |
| 1088 | |||
| 1089 | dmaengine_unmap_put(unmap); | ||
| 1077 | 1090 | ||
| 1078 | qp->last_cookie = cookie; | 1091 | qp->last_cookie = cookie; |
| 1079 | 1092 | ||
| @@ -1081,11 +1094,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, | |||
| 1081 | 1094 | ||
| 1082 | return; | 1095 | return; |
| 1083 | 1096 | ||
| 1084 | err3: | 1097 | err_set_unmap: |
| 1085 | dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); | 1098 | dmaengine_unmap_put(unmap); |
| 1086 | err2: | 1099 | err_get_unmap: |
| 1087 | dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE); | 1100 | dmaengine_unmap_put(unmap); |
| 1088 | err1: | 1101 | err_wait: |
| 1089 | /* If the callbacks come out of order, the writing of the index to the | 1102 | /* If the callbacks come out of order, the writing of the index to the |
| 1090 | * last completed will be out of order. This may result in the | 1103 | * last completed will be out of order. This may result in the |
| 1091 | * receive stalling forever. | 1104 | * receive stalling forever. |
| @@ -1245,12 +1258,12 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, | |||
| 1245 | struct dma_chan *chan = qp->dma_chan; | 1258 | struct dma_chan *chan = qp->dma_chan; |
| 1246 | struct dma_device *device; | 1259 | struct dma_device *device; |
| 1247 | size_t dest_off, buff_off; | 1260 | size_t dest_off, buff_off; |
| 1248 | dma_addr_t src, dest; | 1261 | struct dmaengine_unmap_data *unmap; |
| 1262 | dma_addr_t dest; | ||
| 1249 | dma_cookie_t cookie; | 1263 | dma_cookie_t cookie; |
| 1250 | void __iomem *offset; | 1264 | void __iomem *offset; |
| 1251 | size_t len = entry->len; | 1265 | size_t len = entry->len; |
| 1252 | void *buf = entry->buf; | 1266 | void *buf = entry->buf; |
| 1253 | unsigned long flags; | ||
| 1254 | 1267 | ||
| 1255 | offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; | 1268 | offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; |
| 1256 | hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); | 1269 | hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); |
| @@ -1273,28 +1286,41 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, | |||
| 1273 | if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) | 1286 | if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) |
| 1274 | goto err; | 1287 | goto err; |
| 1275 | 1288 | ||
| 1276 | src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE); | 1289 | unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); |
| 1277 | if (dma_mapping_error(device->dev, src)) | 1290 | if (!unmap) |
| 1278 | goto err; | 1291 | goto err; |
| 1279 | 1292 | ||
| 1280 | flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT; | 1293 | unmap->len = len; |
| 1281 | txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); | 1294 | unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), |
| 1295 | buff_off, len, DMA_TO_DEVICE); | ||
| 1296 | if (dma_mapping_error(device->dev, unmap->addr[0])) | ||
| 1297 | goto err_get_unmap; | ||
| 1298 | |||
| 1299 | unmap->to_cnt = 1; | ||
| 1300 | |||
| 1301 | txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, | ||
| 1302 | DMA_PREP_INTERRUPT); | ||
| 1282 | if (!txd) | 1303 | if (!txd) |
| 1283 | goto err1; | 1304 | goto err_get_unmap; |
| 1284 | 1305 | ||
| 1285 | txd->callback = ntb_tx_copy_callback; | 1306 | txd->callback = ntb_tx_copy_callback; |
| 1286 | txd->callback_param = entry; | 1307 | txd->callback_param = entry; |
| 1308 | dma_set_unmap(txd, unmap); | ||
| 1287 | 1309 | ||
| 1288 | cookie = dmaengine_submit(txd); | 1310 | cookie = dmaengine_submit(txd); |
| 1289 | if (dma_submit_error(cookie)) | 1311 | if (dma_submit_error(cookie)) |
| 1290 | goto err1; | 1312 | goto err_set_unmap; |
| 1313 | |||
| 1314 | dmaengine_unmap_put(unmap); | ||
| 1291 | 1315 | ||
| 1292 | dma_async_issue_pending(chan); | 1316 | dma_async_issue_pending(chan); |
| 1293 | qp->tx_async++; | 1317 | qp->tx_async++; |
| 1294 | 1318 | ||
| 1295 | return; | 1319 | return; |
| 1296 | err1: | 1320 | err_set_unmap: |
| 1297 | dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); | 1321 | dmaengine_unmap_put(unmap); |
| 1322 | err_get_unmap: | ||
| 1323 | dmaengine_unmap_put(unmap); | ||
| 1298 | err: | 1324 | err: |
| 1299 | ntb_memcpy_tx(entry, offset); | 1325 | ntb_memcpy_tx(entry, offset); |
| 1300 | qp->tx_memcpy++; | 1326 | qp->tx_memcpy++; |
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 1ce8ee054f1a..a94d850ae228 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c | |||
| @@ -367,7 +367,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) | |||
| 367 | string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; | 367 | string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; |
| 368 | } | 368 | } |
| 369 | 369 | ||
| 370 | handle = DEVICE_ACPI_HANDLE(&pdev->dev); | 370 | handle = ACPI_HANDLE(&pdev->dev); |
| 371 | if (!handle) { | 371 | if (!handle) { |
| 372 | /* | 372 | /* |
| 373 | * This hotplug controller was not listed in the ACPI name | 373 | * This hotplug controller was not listed in the ACPI name |
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index 26100f510b10..1592dbe4f904 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h | |||
| @@ -176,7 +176,6 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot); | |||
| 176 | u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot); | 176 | u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot); |
| 177 | 177 | ||
| 178 | /* variables */ | 178 | /* variables */ |
| 179 | extern bool acpiphp_debug; | ||
| 180 | extern bool acpiphp_disabled; | 179 | extern bool acpiphp_disabled; |
| 181 | 180 | ||
| 182 | #endif /* _ACPIPHP_H */ | 181 | #endif /* _ACPIPHP_H */ |
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c index ead7c534095e..cff7cadfc2e4 100644 --- a/drivers/pci/hotplug/pciehp_acpi.c +++ b/drivers/pci/hotplug/pciehp_acpi.c | |||
| @@ -54,7 +54,7 @@ int pciehp_acpi_slot_detection_check(struct pci_dev *dev) | |||
| 54 | { | 54 | { |
| 55 | if (slot_detection_mode != PCIEHP_DETECT_ACPI) | 55 | if (slot_detection_mode != PCIEHP_DETECT_ACPI) |
| 56 | return 0; | 56 | return 0; |
| 57 | if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev))) | 57 | if (acpi_pci_detect_ejectable(ACPI_HANDLE(&dev->dev))) |
| 58 | return 0; | 58 | return 0; |
| 59 | return -ENODEV; | 59 | return -ENODEV; |
| 60 | } | 60 | } |
| @@ -96,7 +96,7 @@ static int __init dummy_probe(struct pcie_device *dev) | |||
| 96 | dup_slot_id++; | 96 | dup_slot_id++; |
| 97 | } | 97 | } |
| 98 | list_add_tail(&slot->list, &dummy_slots); | 98 | list_add_tail(&slot->list, &dummy_slots); |
| 99 | handle = DEVICE_ACPI_HANDLE(&pdev->dev); | 99 | handle = ACPI_HANDLE(&pdev->dev); |
| 100 | if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) | 100 | if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) |
| 101 | acpi_slot_detected = 1; | 101 | acpi_slot_detected = 1; |
| 102 | return -ENODEV; /* dummy driver always returns error */ | 102 | return -ENODEV; /* dummy driver always returns error */ |
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index b2781dfe60e9..5b05a68cca6c 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | * Work to add BIOS PROM support was completed by Mike Habeck. | 9 | * Work to add BIOS PROM support was completed by Mike Habeck. |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/acpi.h> | ||
| 12 | #include <linux/init.h> | 13 | #include <linux/init.h> |
| 13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| @@ -29,7 +30,6 @@ | |||
| 29 | #include <asm/sn/sn_feature_sets.h> | 30 | #include <asm/sn/sn_feature_sets.h> |
| 30 | #include <asm/sn/sn_sal.h> | 31 | #include <asm/sn/sn_sal.h> |
| 31 | #include <asm/sn/types.h> | 32 | #include <asm/sn/types.h> |
| 32 | #include <linux/acpi.h> | ||
| 33 | #include <asm/sn/acpi.h> | 33 | #include <asm/sn/acpi.h> |
| 34 | 34 | ||
| 35 | #include "../pci.h" | 35 | #include "../pci.h" |
| @@ -414,7 +414,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
| 414 | acpi_handle rethandle; | 414 | acpi_handle rethandle; |
| 415 | acpi_status ret; | 415 | acpi_status ret; |
| 416 | 416 | ||
| 417 | phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle; | 417 | phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion); |
| 418 | 418 | ||
| 419 | if (acpi_bus_get_device(phandle, &pdevice)) { | 419 | if (acpi_bus_get_device(phandle, &pdevice)) { |
| 420 | dev_dbg(&slot->pci_bus->self->dev, | 420 | dev_dbg(&slot->pci_bus->self->dev, |
| @@ -495,7 +495,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
| 495 | 495 | ||
| 496 | /* free the ACPI resources for the slot */ | 496 | /* free the ACPI resources for the slot */ |
| 497 | if (SN_ACPI_BASE_SUPPORT() && | 497 | if (SN_ACPI_BASE_SUPPORT() && |
| 498 | PCI_CONTROLLER(slot->pci_bus)->acpi_handle) { | 498 | PCI_CONTROLLER(slot->pci_bus)->companion) { |
| 499 | unsigned long long adr; | 499 | unsigned long long adr; |
| 500 | struct acpi_device *device; | 500 | struct acpi_device *device; |
| 501 | acpi_handle phandle; | 501 | acpi_handle phandle; |
| @@ -504,7 +504,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
| 504 | acpi_status ret; | 504 | acpi_status ret; |
| 505 | 505 | ||
| 506 | /* Get the rootbus node pointer */ | 506 | /* Get the rootbus node pointer */ |
| 507 | phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle; | 507 | phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion); |
| 508 | 508 | ||
| 509 | acpi_scan_lock_acquire(); | 509 | acpi_scan_lock_acquire(); |
| 510 | /* | 510 | /* |
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c index 1b90579b233a..50ce68098298 100644 --- a/drivers/pci/ioapic.c +++ b/drivers/pci/ioapic.c | |||
| @@ -37,7 +37,7 @@ static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |||
| 37 | char *type; | 37 | char *type; |
| 38 | struct resource *res; | 38 | struct resource *res; |
| 39 | 39 | ||
| 40 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | 40 | handle = ACPI_HANDLE(&dev->dev); |
| 41 | if (!handle) | 41 | if (!handle) |
| 42 | return -EINVAL; | 42 | return -EINVAL; |
| 43 | 43 | ||
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index dfd1f59de729..f166126e28d1 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
| @@ -173,14 +173,14 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) | |||
| 173 | 173 | ||
| 174 | static bool acpi_pci_power_manageable(struct pci_dev *dev) | 174 | static bool acpi_pci_power_manageable(struct pci_dev *dev) |
| 175 | { | 175 | { |
| 176 | acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); | 176 | acpi_handle handle = ACPI_HANDLE(&dev->dev); |
| 177 | 177 | ||
| 178 | return handle ? acpi_bus_power_manageable(handle) : false; | 178 | return handle ? acpi_bus_power_manageable(handle) : false; |
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) | 181 | static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) |
| 182 | { | 182 | { |
| 183 | acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); | 183 | acpi_handle handle = ACPI_HANDLE(&dev->dev); |
| 184 | static const u8 state_conv[] = { | 184 | static const u8 state_conv[] = { |
| 185 | [PCI_D0] = ACPI_STATE_D0, | 185 | [PCI_D0] = ACPI_STATE_D0, |
| 186 | [PCI_D1] = ACPI_STATE_D1, | 186 | [PCI_D1] = ACPI_STATE_D1, |
| @@ -217,7 +217,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
| 217 | 217 | ||
| 218 | static bool acpi_pci_can_wakeup(struct pci_dev *dev) | 218 | static bool acpi_pci_can_wakeup(struct pci_dev *dev) |
| 219 | { | 219 | { |
| 220 | acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); | 220 | acpi_handle handle = ACPI_HANDLE(&dev->dev); |
| 221 | 221 | ||
| 222 | return handle ? acpi_bus_can_wakeup(handle) : false; | 222 | return handle ? acpi_bus_can_wakeup(handle) : false; |
| 223 | } | 223 | } |
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index edaed6f4da6c..d51f45aa669e 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c | |||
| @@ -263,7 +263,7 @@ device_has_dsm(struct device *dev) | |||
| 263 | acpi_handle handle; | 263 | acpi_handle handle; |
| 264 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; | 264 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; |
| 265 | 265 | ||
| 266 | handle = DEVICE_ACPI_HANDLE(dev); | 266 | handle = ACPI_HANDLE(dev); |
| 267 | 267 | ||
| 268 | if (!handle) | 268 | if (!handle) |
| 269 | return FALSE; | 269 | return FALSE; |
| @@ -295,7 +295,7 @@ acpilabel_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
| 295 | acpi_handle handle; | 295 | acpi_handle handle; |
| 296 | int length; | 296 | int length; |
| 297 | 297 | ||
| 298 | handle = DEVICE_ACPI_HANDLE(dev); | 298 | handle = ACPI_HANDLE(dev); |
| 299 | 299 | ||
| 300 | if (!handle) | 300 | if (!handle) |
| 301 | return -1; | 301 | return -1; |
| @@ -316,7 +316,7 @@ acpiindex_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
| 316 | acpi_handle handle; | 316 | acpi_handle handle; |
| 317 | int length; | 317 | int length; |
| 318 | 318 | ||
| 319 | handle = DEVICE_ACPI_HANDLE(dev); | 319 | handle = ACPI_HANDLE(dev); |
| 320 | 320 | ||
| 321 | if (!handle) | 321 | if (!handle) |
| 322 | return -1; | 322 | return -1; |
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index 605a9be55129..b9429fbf1cd8 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c | |||
| @@ -519,7 +519,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) | |||
| 519 | 519 | ||
| 520 | gmux_data->power_state = VGA_SWITCHEROO_ON; | 520 | gmux_data->power_state = VGA_SWITCHEROO_ON; |
| 521 | 521 | ||
| 522 | gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev); | 522 | gmux_data->dhandle = ACPI_HANDLE(&pnp->dev); |
| 523 | if (!gmux_data->dhandle) { | 523 | if (!gmux_data->dhandle) { |
| 524 | pr_err("Cannot find acpi handle for pnp device %s\n", | 524 | pr_err("Cannot find acpi handle for pnp device %s\n", |
| 525 | dev_name(&pnp->dev)); | 525 | dev_name(&pnp->dev)); |
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 747826d99059..14655a0f0431 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c | |||
| @@ -89,7 +89,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) | |||
| 89 | 89 | ||
| 90 | pnp_dbg(&dev->dev, "set resources\n"); | 90 | pnp_dbg(&dev->dev, "set resources\n"); |
| 91 | 91 | ||
| 92 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | 92 | handle = ACPI_HANDLE(&dev->dev); |
| 93 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { | 93 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { |
| 94 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); | 94 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); |
| 95 | return -ENODEV; | 95 | return -ENODEV; |
| @@ -122,7 +122,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) | |||
| 122 | 122 | ||
| 123 | dev_dbg(&dev->dev, "disable resources\n"); | 123 | dev_dbg(&dev->dev, "disable resources\n"); |
| 124 | 124 | ||
| 125 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | 125 | handle = ACPI_HANDLE(&dev->dev); |
| 126 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { | 126 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { |
| 127 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); | 127 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); |
| 128 | return 0; | 128 | return 0; |
| @@ -144,7 +144,7 @@ static bool pnpacpi_can_wakeup(struct pnp_dev *dev) | |||
| 144 | struct acpi_device *acpi_dev; | 144 | struct acpi_device *acpi_dev; |
| 145 | acpi_handle handle; | 145 | acpi_handle handle; |
| 146 | 146 | ||
| 147 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | 147 | handle = ACPI_HANDLE(&dev->dev); |
| 148 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { | 148 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { |
| 149 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); | 149 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); |
| 150 | return false; | 150 | return false; |
| @@ -159,7 +159,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) | |||
| 159 | acpi_handle handle; | 159 | acpi_handle handle; |
| 160 | int error = 0; | 160 | int error = 0; |
| 161 | 161 | ||
| 162 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | 162 | handle = ACPI_HANDLE(&dev->dev); |
| 163 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { | 163 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { |
| 164 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); | 164 | dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); |
| 165 | return 0; | 165 | return 0; |
| @@ -194,7 +194,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) | |||
| 194 | static int pnpacpi_resume(struct pnp_dev *dev) | 194 | static int pnpacpi_resume(struct pnp_dev *dev) |
| 195 | { | 195 | { |
| 196 | struct acpi_device *acpi_dev; | 196 | struct acpi_device *acpi_dev; |
| 197 | acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); | 197 | acpi_handle handle = ACPI_HANDLE(&dev->dev); |
| 198 | int error = 0; | 198 | int error = 0; |
| 199 | 199 | ||
| 200 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { | 200 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { |
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index d85ac1a9d2c0..fbcd48d0bfc3 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c | |||
| @@ -511,7 +511,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
| 511 | goto cleanup; | 511 | goto cleanup; |
| 512 | } | 512 | } |
| 513 | 513 | ||
| 514 | if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) { | 514 | if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || |
| 515 | (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { | ||
| 515 | rcode = -EINVAL; | 516 | rcode = -EINVAL; |
| 516 | goto cleanup; | 517 | goto cleanup; |
| 517 | } | 518 | } |
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index e43db7742047..bd6f743d87a7 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c | |||
| @@ -1512,7 +1512,8 @@ static int pmcraid_notify_aen( | |||
| 1512 | } | 1512 | } |
| 1513 | 1513 | ||
| 1514 | result = | 1514 | result = |
| 1515 | genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC); | 1515 | genlmsg_multicast(&pmcraid_event_family, skb, 0, |
| 1516 | pmcraid_event_family.id, GFP_ATOMIC); | ||
| 1516 | 1517 | ||
| 1517 | /* If there are no listeners, genlmsg_multicast may return non-zero | 1518 | /* If there are no listeners, genlmsg_multicast may return non-zero |
| 1518 | * value. | 1519 | * value. |
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index b9f0192758d6..6d207afec8cb 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c | |||
| @@ -150,7 +150,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | |||
| 150 | &dws->tx_sgl, | 150 | &dws->tx_sgl, |
| 151 | 1, | 151 | 1, |
| 152 | DMA_MEM_TO_DEV, | 152 | DMA_MEM_TO_DEV, |
| 153 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); | 153 | DMA_PREP_INTERRUPT); |
| 154 | txdesc->callback = dw_spi_dma_done; | 154 | txdesc->callback = dw_spi_dma_done; |
| 155 | txdesc->callback_param = dws; | 155 | txdesc->callback_param = dws; |
| 156 | 156 | ||
| @@ -173,7 +173,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | |||
| 173 | &dws->rx_sgl, | 173 | &dws->rx_sgl, |
| 174 | 1, | 174 | 1, |
| 175 | DMA_DEV_TO_MEM, | 175 | DMA_DEV_TO_MEM, |
| 176 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); | 176 | DMA_PREP_INTERRUPT); |
| 177 | rxdesc->callback = dw_spi_dma_done; | 177 | rxdesc->callback = dw_spi_dma_done; |
| 178 | rxdesc->callback_param = dws; | 178 | rxdesc->callback_param = dws; |
| 179 | 179 | ||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8d85ddc46011..18cc625d887f 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
| @@ -357,6 +357,19 @@ struct spi_device *spi_alloc_device(struct spi_master *master) | |||
| 357 | } | 357 | } |
| 358 | EXPORT_SYMBOL_GPL(spi_alloc_device); | 358 | EXPORT_SYMBOL_GPL(spi_alloc_device); |
| 359 | 359 | ||
| 360 | static void spi_dev_set_name(struct spi_device *spi) | ||
| 361 | { | ||
| 362 | struct acpi_device *adev = ACPI_COMPANION(&spi->dev); | ||
| 363 | |||
| 364 | if (adev) { | ||
| 365 | dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); | ||
| 366 | return; | ||
| 367 | } | ||
| 368 | |||
| 369 | dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), | ||
| 370 | spi->chip_select); | ||
| 371 | } | ||
| 372 | |||
| 360 | /** | 373 | /** |
| 361 | * spi_add_device - Add spi_device allocated with spi_alloc_device | 374 | * spi_add_device - Add spi_device allocated with spi_alloc_device |
| 362 | * @spi: spi_device to register | 375 | * @spi: spi_device to register |
| @@ -383,9 +396,7 @@ int spi_add_device(struct spi_device *spi) | |||
| 383 | } | 396 | } |
| 384 | 397 | ||
| 385 | /* Set the bus ID string */ | 398 | /* Set the bus ID string */ |
| 386 | dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), | 399 | spi_dev_set_name(spi); |
| 387 | spi->chip_select); | ||
| 388 | |||
| 389 | 400 | ||
| 390 | /* We need to make sure there's no other device with this | 401 | /* We need to make sure there's no other device with this |
| 391 | * chipselect **BEFORE** we call setup(), else we'll trash | 402 | * chipselect **BEFORE** we call setup(), else we'll trash |
| @@ -1144,7 +1155,7 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, | |||
| 1144 | return AE_NO_MEMORY; | 1155 | return AE_NO_MEMORY; |
| 1145 | } | 1156 | } |
| 1146 | 1157 | ||
| 1147 | ACPI_HANDLE_SET(&spi->dev, handle); | 1158 | ACPI_COMPANION_SET(&spi->dev, adev); |
| 1148 | spi->irq = -1; | 1159 | spi->irq = -1; |
| 1149 | 1160 | ||
| 1150 | INIT_LIST_HEAD(&resource_list); | 1161 | INIT_LIST_HEAD(&resource_list); |
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 03a567199bbe..f1d511a9475b 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c | |||
| @@ -1608,15 +1608,17 @@ exit: | |||
| 1608 | EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name); | 1608 | EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name); |
| 1609 | 1609 | ||
| 1610 | #ifdef CONFIG_NET | 1610 | #ifdef CONFIG_NET |
| 1611 | static const struct genl_multicast_group thermal_event_mcgrps[] = { | ||
| 1612 | { .name = THERMAL_GENL_MCAST_GROUP_NAME, }, | ||
| 1613 | }; | ||
| 1614 | |||
| 1611 | static struct genl_family thermal_event_genl_family = { | 1615 | static struct genl_family thermal_event_genl_family = { |
| 1612 | .id = GENL_ID_GENERATE, | 1616 | .id = GENL_ID_GENERATE, |
| 1613 | .name = THERMAL_GENL_FAMILY_NAME, | 1617 | .name = THERMAL_GENL_FAMILY_NAME, |
| 1614 | .version = THERMAL_GENL_VERSION, | 1618 | .version = THERMAL_GENL_VERSION, |
| 1615 | .maxattr = THERMAL_GENL_ATTR_MAX, | 1619 | .maxattr = THERMAL_GENL_ATTR_MAX, |
| 1616 | }; | 1620 | .mcgrps = thermal_event_mcgrps, |
| 1617 | 1621 | .n_mcgrps = ARRAY_SIZE(thermal_event_mcgrps), | |
| 1618 | static struct genl_multicast_group thermal_event_mcgrp = { | ||
| 1619 | .name = THERMAL_GENL_MCAST_GROUP_NAME, | ||
| 1620 | }; | 1622 | }; |
| 1621 | 1623 | ||
| 1622 | int thermal_generate_netlink_event(struct thermal_zone_device *tz, | 1624 | int thermal_generate_netlink_event(struct thermal_zone_device *tz, |
| @@ -1677,7 +1679,8 @@ int thermal_generate_netlink_event(struct thermal_zone_device *tz, | |||
| 1677 | return result; | 1679 | return result; |
| 1678 | } | 1680 | } |
| 1679 | 1681 | ||
| 1680 | result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC); | 1682 | result = genlmsg_multicast(&thermal_event_genl_family, skb, 0, |
| 1683 | 0, GFP_ATOMIC); | ||
| 1681 | if (result) | 1684 | if (result) |
| 1682 | dev_err(&tz->device, "Failed to send netlink event:%d", result); | 1685 | dev_err(&tz->device, "Failed to send netlink event:%d", result); |
| 1683 | 1686 | ||
| @@ -1687,17 +1690,7 @@ EXPORT_SYMBOL_GPL(thermal_generate_netlink_event); | |||
| 1687 | 1690 | ||
| 1688 | static int genetlink_init(void) | 1691 | static int genetlink_init(void) |
| 1689 | { | 1692 | { |
| 1690 | int result; | 1693 | return genl_register_family(&thermal_event_genl_family); |
| 1691 | |||
| 1692 | result = genl_register_family(&thermal_event_genl_family); | ||
| 1693 | if (result) | ||
| 1694 | return result; | ||
| 1695 | |||
| 1696 | result = genl_register_mc_group(&thermal_event_genl_family, | ||
| 1697 | &thermal_event_mcgrp); | ||
| 1698 | if (result) | ||
| 1699 | genl_unregister_family(&thermal_event_genl_family); | ||
| 1700 | return result; | ||
| 1701 | } | 1694 | } |
| 1702 | 1695 | ||
| 1703 | static void genetlink_exit(void) | 1696 | static void genetlink_exit(void) |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 537750261aaa..7d8103cd3e2e 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
| @@ -1433,7 +1433,7 @@ static void work_fn_rx(struct work_struct *work) | |||
| 1433 | desc = s->desc_rx[new]; | 1433 | desc = s->desc_rx[new]; |
| 1434 | 1434 | ||
| 1435 | if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != | 1435 | if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != |
| 1436 | DMA_SUCCESS) { | 1436 | DMA_COMPLETE) { |
| 1437 | /* Handle incomplete DMA receive */ | 1437 | /* Handle incomplete DMA receive */ |
| 1438 | struct dma_chan *chan = s->chan_rx; | 1438 | struct dma_chan *chan = s->chan_rx; |
| 1439 | struct shdma_desc *sh_desc = container_of(desc, | 1439 | struct shdma_desc *sh_desc = container_of(desc, |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 06cec635e703..a7c04e24ca48 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
| @@ -5501,6 +5501,6 @@ acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev, | |||
| 5501 | if (!hub) | 5501 | if (!hub) |
| 5502 | return NULL; | 5502 | return NULL; |
| 5503 | 5503 | ||
| 5504 | return DEVICE_ACPI_HANDLE(&hub->ports[port1 - 1]->dev); | 5504 | return ACPI_HANDLE(&hub->ports[port1 - 1]->dev); |
| 5505 | } | 5505 | } |
| 5506 | #endif | 5506 | #endif |
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 255c14464bf2..4e243c37f17f 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c | |||
| @@ -173,7 +173,7 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle) | |||
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | /* root hub's parent is the usb hcd. */ | 175 | /* root hub's parent is the usb hcd. */ |
| 176 | parent_handle = DEVICE_ACPI_HANDLE(dev->parent); | 176 | parent_handle = ACPI_HANDLE(dev->parent); |
| 177 | *handle = acpi_get_child(parent_handle, udev->portnum); | 177 | *handle = acpi_get_child(parent_handle, udev->portnum); |
| 178 | if (!*handle) | 178 | if (!*handle) |
| 179 | return -ENODEV; | 179 | return -ENODEV; |
| @@ -194,7 +194,7 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle) | |||
| 194 | 194 | ||
| 195 | raw_port_num = usb_hcd_find_raw_port_number(hcd, | 195 | raw_port_num = usb_hcd_find_raw_port_number(hcd, |
| 196 | port_num); | 196 | port_num); |
| 197 | *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev), | 197 | *handle = acpi_get_child(ACPI_HANDLE(&udev->dev), |
| 198 | raw_port_num); | 198 | raw_port_num); |
| 199 | if (!*handle) | 199 | if (!*handle) |
| 200 | return -ENODEV; | 200 | return -ENODEV; |
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c index d15f6e80479f..188825122aae 100644 --- a/drivers/xen/pci.c +++ b/drivers/xen/pci.c | |||
| @@ -59,12 +59,12 @@ static int xen_add_device(struct device *dev) | |||
| 59 | add.flags = XEN_PCI_DEV_EXTFN; | 59 | add.flags = XEN_PCI_DEV_EXTFN; |
| 60 | 60 | ||
| 61 | #ifdef CONFIG_ACPI | 61 | #ifdef CONFIG_ACPI |
| 62 | handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); | 62 | handle = ACPI_HANDLE(&pci_dev->dev); |
| 63 | if (!handle && pci_dev->bus->bridge) | 63 | if (!handle && pci_dev->bus->bridge) |
| 64 | handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); | 64 | handle = ACPI_HANDLE(pci_dev->bus->bridge); |
| 65 | #ifdef CONFIG_PCI_IOV | 65 | #ifdef CONFIG_PCI_IOV |
| 66 | if (!handle && pci_dev->is_virtfn) | 66 | if (!handle && pci_dev->is_virtfn) |
| 67 | handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge); | 67 | handle = ACPI_HANDLE(physfn->bus->bridge); |
| 68 | #endif | 68 | #endif |
| 69 | if (handle) { | 69 | if (handle) { |
| 70 | acpi_status status; | 70 | acpi_status status; |
