diff options
| author | Ingo Molnar <mingo@kernel.org> | 2017-08-25 05:04:51 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2017-08-25 05:04:51 -0400 |
| commit | 10c9850cb2ced2ce528e5b692c639974213a64ec (patch) | |
| tree | f8063beac0ba1dab069d25661845c5b7ef9a67c7 /drivers | |
| parent | 0c2364791343e4b04cd1f097ff2abc2799062448 (diff) | |
| parent | 90a6cd503982bfd33ce8c70eb49bd2dd33bc6325 (diff) | |
Merge branch 'linus' into locking/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers')
173 files changed, 1502 insertions, 570 deletions
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c index 538c61677c10..783f4c838aee 100644 --- a/drivers/acpi/acpica/nsxfeval.c +++ b/drivers/acpi/acpica/nsxfeval.c | |||
| @@ -100,9 +100,13 @@ acpi_evaluate_object_typed(acpi_handle handle, | |||
| 100 | free_buffer_on_error = TRUE; | 100 | free_buffer_on_error = TRUE; |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | status = acpi_get_handle(handle, pathname, &target_handle); | 103 | if (pathname) { |
| 104 | if (ACPI_FAILURE(status)) { | 104 | status = acpi_get_handle(handle, pathname, &target_handle); |
| 105 | return_ACPI_STATUS(status); | 105 | if (ACPI_FAILURE(status)) { |
| 106 | return_ACPI_STATUS(status); | ||
| 107 | } | ||
| 108 | } else { | ||
| 109 | target_handle = handle; | ||
| 106 | } | 110 | } |
| 107 | 111 | ||
| 108 | full_pathname = acpi_ns_get_external_pathname(target_handle); | 112 | full_pathname = acpi_ns_get_external_pathname(target_handle); |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 62068a5e814f..ae3d6d152633 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -1741,7 +1741,7 @@ error: | |||
| 1741 | * functioning ECDT EC first in order to handle the events. | 1741 | * functioning ECDT EC first in order to handle the events. |
| 1742 | * https://bugzilla.kernel.org/show_bug.cgi?id=115021 | 1742 | * https://bugzilla.kernel.org/show_bug.cgi?id=115021 |
| 1743 | */ | 1743 | */ |
| 1744 | int __init acpi_ec_ecdt_start(void) | 1744 | static int __init acpi_ec_ecdt_start(void) |
| 1745 | { | 1745 | { |
| 1746 | acpi_handle handle; | 1746 | acpi_handle handle; |
| 1747 | 1747 | ||
| @@ -2003,20 +2003,17 @@ static inline void acpi_ec_query_exit(void) | |||
| 2003 | int __init acpi_ec_init(void) | 2003 | int __init acpi_ec_init(void) |
| 2004 | { | 2004 | { |
| 2005 | int result; | 2005 | int result; |
| 2006 | int ecdt_fail, dsdt_fail; | ||
| 2006 | 2007 | ||
| 2007 | /* register workqueue for _Qxx evaluations */ | 2008 | /* register workqueue for _Qxx evaluations */ |
| 2008 | result = acpi_ec_query_init(); | 2009 | result = acpi_ec_query_init(); |
| 2009 | if (result) | 2010 | if (result) |
| 2010 | goto err_exit; | 2011 | return result; |
| 2011 | /* Now register the driver for the EC */ | ||
| 2012 | result = acpi_bus_register_driver(&acpi_ec_driver); | ||
| 2013 | if (result) | ||
| 2014 | goto err_exit; | ||
| 2015 | 2012 | ||
| 2016 | err_exit: | 2013 | /* Drivers must be started after acpi_ec_query_init() */ |
| 2017 | if (result) | 2014 | ecdt_fail = acpi_ec_ecdt_start(); |
| 2018 | acpi_ec_query_exit(); | 2015 | dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); |
| 2019 | return result; | 2016 | return ecdt_fail && dsdt_fail ? -ENODEV : 0; |
| 2020 | } | 2017 | } |
| 2021 | 2018 | ||
| 2022 | /* EC driver currently not unloadable */ | 2019 | /* EC driver currently not unloadable */ |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 58dd7ab3c653..3f5af4d7a739 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
| @@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data); | |||
| 185 | int acpi_ec_init(void); | 185 | int acpi_ec_init(void); |
| 186 | int acpi_ec_ecdt_probe(void); | 186 | int acpi_ec_ecdt_probe(void); |
| 187 | int acpi_ec_dsdt_probe(void); | 187 | int acpi_ec_dsdt_probe(void); |
| 188 | int acpi_ec_ecdt_start(void); | ||
| 189 | void acpi_ec_block_transactions(void); | 188 | void acpi_ec_block_transactions(void); |
| 190 | void acpi_ec_unblock_transactions(void); | 189 | void acpi_ec_unblock_transactions(void); |
| 191 | int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, | 190 | int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, |
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 917c789f953d..476a52c60cf3 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c | |||
| @@ -1047,7 +1047,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value( | |||
| 1047 | fwnode_for_each_child_node(fwnode, child) { | 1047 | fwnode_for_each_child_node(fwnode, child) { |
| 1048 | u32 nr; | 1048 | u32 nr; |
| 1049 | 1049 | ||
| 1050 | if (!fwnode_property_read_u32(fwnode, prop_name, &nr)) | 1050 | if (fwnode_property_read_u32(child, prop_name, &nr)) |
| 1051 | continue; | 1051 | continue; |
| 1052 | 1052 | ||
| 1053 | if (val == nr) | 1053 | if (val == nr) |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 33897298f03e..70fd5502c284 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
| @@ -2084,7 +2084,6 @@ int __init acpi_scan_init(void) | |||
| 2084 | 2084 | ||
| 2085 | acpi_gpe_apply_masked_gpes(); | 2085 | acpi_gpe_apply_masked_gpes(); |
| 2086 | acpi_update_all_gpes(); | 2086 | acpi_update_all_gpes(); |
| 2087 | acpi_ec_ecdt_start(); | ||
| 2088 | 2087 | ||
| 2089 | acpi_scan_initialized = true; | 2088 | acpi_scan_initialized = true; |
| 2090 | 2089 | ||
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 4ac3e06b41d8..98aa8c808a33 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c | |||
| @@ -17,6 +17,16 @@ | |||
| 17 | #include <linux/serial_core.h> | 17 | #include <linux/serial_core.h> |
| 18 | 18 | ||
| 19 | /* | 19 | /* |
| 20 | * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as | ||
| 21 | * occasionally getting stuck as 1. To avoid the potential for a hang, check | ||
| 22 | * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART | ||
| 23 | * implementations, so only do so if an affected platform is detected in | ||
| 24 | * parse_spcr(). | ||
| 25 | */ | ||
| 26 | bool qdf2400_e44_present; | ||
| 27 | EXPORT_SYMBOL(qdf2400_e44_present); | ||
| 28 | |||
| 29 | /* | ||
| 20 | * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit. | 30 | * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit. |
| 21 | * Detect them by examining the OEM fields in the SPCR header, similiar to PCI | 31 | * Detect them by examining the OEM fields in the SPCR header, similiar to PCI |
| 22 | * quirk detection in pci_mcfg.c. | 32 | * quirk detection in pci_mcfg.c. |
| @@ -147,8 +157,30 @@ int __init parse_spcr(bool earlycon) | |||
| 147 | goto done; | 157 | goto done; |
| 148 | } | 158 | } |
| 149 | 159 | ||
| 150 | if (qdf2400_erratum_44_present(&table->header)) | 160 | /* |
| 151 | uart = "qdf2400_e44"; | 161 | * If the E44 erratum is required, then we need to tell the pl011 |
| 162 | * driver to implement the work-around. | ||
| 163 | * | ||
| 164 | * The global variable is used by the probe function when it | ||
| 165 | * creates the UARTs, whether or not they're used as a console. | ||
| 166 | * | ||
| 167 | * If the user specifies "traditional" earlycon, the qdf2400_e44 | ||
| 168 | * console name matches the EARLYCON_DECLARE() statement, and | ||
| 169 | * SPCR is not used. Parameter "earlycon" is false. | ||
| 170 | * | ||
| 171 | * If the user specifies "SPCR" earlycon, then we need to update | ||
| 172 | * the console name so that it also says "qdf2400_e44". Parameter | ||
| 173 | * "earlycon" is true. | ||
| 174 | * | ||
| 175 | * For consistency, if we change the console name, then we do it | ||
| 176 | * for everyone, not just earlycon. | ||
| 177 | */ | ||
| 178 | if (qdf2400_erratum_44_present(&table->header)) { | ||
| 179 | qdf2400_e44_present = true; | ||
| 180 | if (earlycon) | ||
| 181 | uart = "qdf2400_e44"; | ||
| 182 | } | ||
| 183 | |||
| 152 | if (xgene_8250_erratum_present(table)) | 184 | if (xgene_8250_erratum_present(table)) |
| 153 | iotype = "mmio32"; | 185 | iotype = "mmio32"; |
| 154 | 186 | ||
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index b9f907eedbf7..bfbe1e154128 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
| @@ -30,7 +30,6 @@ | |||
| 30 | #include <linux/syscore_ops.h> | 30 | #include <linux/syscore_ops.h> |
| 31 | #include <linux/reboot.h> | 31 | #include <linux/reboot.h> |
| 32 | #include <linux/security.h> | 32 | #include <linux/security.h> |
| 33 | #include <linux/swait.h> | ||
| 34 | 33 | ||
| 35 | #include <generated/utsrelease.h> | 34 | #include <generated/utsrelease.h> |
| 36 | 35 | ||
| @@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void) | |||
| 112 | * state of the firmware loading. | 111 | * state of the firmware loading. |
| 113 | */ | 112 | */ |
| 114 | struct fw_state { | 113 | struct fw_state { |
| 115 | struct swait_queue_head wq; | 114 | struct completion completion; |
| 116 | enum fw_status status; | 115 | enum fw_status status; |
| 117 | }; | 116 | }; |
| 118 | 117 | ||
| 119 | static void fw_state_init(struct fw_state *fw_st) | 118 | static void fw_state_init(struct fw_state *fw_st) |
| 120 | { | 119 | { |
| 121 | init_swait_queue_head(&fw_st->wq); | 120 | init_completion(&fw_st->completion); |
| 122 | fw_st->status = FW_STATUS_UNKNOWN; | 121 | fw_st->status = FW_STATUS_UNKNOWN; |
| 123 | } | 122 | } |
| 124 | 123 | ||
| @@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout) | |||
| 131 | { | 130 | { |
| 132 | long ret; | 131 | long ret; |
| 133 | 132 | ||
| 134 | ret = swait_event_interruptible_timeout(fw_st->wq, | 133 | ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout); |
| 135 | __fw_state_is_done(READ_ONCE(fw_st->status)), | ||
| 136 | timeout); | ||
| 137 | if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) | 134 | if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) |
| 138 | return -ENOENT; | 135 | return -ENOENT; |
| 139 | if (!ret) | 136 | if (!ret) |
| @@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st, | |||
| 148 | WRITE_ONCE(fw_st->status, status); | 145 | WRITE_ONCE(fw_st->status, status); |
| 149 | 146 | ||
| 150 | if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) | 147 | if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) |
| 151 | swake_up(&fw_st->wq); | 148 | complete_all(&fw_st->completion); |
| 152 | } | 149 | } |
| 153 | 150 | ||
| 154 | #define fw_state_start(fw_st) \ | 151 | #define fw_state_start(fw_st) \ |
| 155 | __fw_state_set(fw_st, FW_STATUS_LOADING) | 152 | __fw_state_set(fw_st, FW_STATUS_LOADING) |
| 156 | #define fw_state_done(fw_st) \ | 153 | #define fw_state_done(fw_st) \ |
| 157 | __fw_state_set(fw_st, FW_STATUS_DONE) | 154 | __fw_state_set(fw_st, FW_STATUS_DONE) |
| 155 | #define fw_state_aborted(fw_st) \ | ||
| 156 | __fw_state_set(fw_st, FW_STATUS_ABORTED) | ||
| 158 | #define fw_state_wait(fw_st) \ | 157 | #define fw_state_wait(fw_st) \ |
| 159 | __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT) | 158 | __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT) |
| 160 | 159 | ||
| 161 | #ifndef CONFIG_FW_LOADER_USER_HELPER | ||
| 162 | |||
| 163 | #define fw_state_is_aborted(fw_st) false | ||
| 164 | |||
| 165 | #else /* CONFIG_FW_LOADER_USER_HELPER */ | ||
| 166 | |||
| 167 | static int __fw_state_check(struct fw_state *fw_st, enum fw_status status) | 160 | static int __fw_state_check(struct fw_state *fw_st, enum fw_status status) |
| 168 | { | 161 | { |
| 169 | return fw_st->status == status; | 162 | return fw_st->status == status; |
| 170 | } | 163 | } |
| 171 | 164 | ||
| 165 | #define fw_state_is_aborted(fw_st) \ | ||
| 166 | __fw_state_check(fw_st, FW_STATUS_ABORTED) | ||
| 167 | |||
| 168 | #ifdef CONFIG_FW_LOADER_USER_HELPER | ||
| 169 | |||
| 172 | #define fw_state_aborted(fw_st) \ | 170 | #define fw_state_aborted(fw_st) \ |
| 173 | __fw_state_set(fw_st, FW_STATUS_ABORTED) | 171 | __fw_state_set(fw_st, FW_STATUS_ABORTED) |
| 174 | #define fw_state_is_done(fw_st) \ | 172 | #define fw_state_is_done(fw_st) \ |
| 175 | __fw_state_check(fw_st, FW_STATUS_DONE) | 173 | __fw_state_check(fw_st, FW_STATUS_DONE) |
| 176 | #define fw_state_is_loading(fw_st) \ | 174 | #define fw_state_is_loading(fw_st) \ |
| 177 | __fw_state_check(fw_st, FW_STATUS_LOADING) | 175 | __fw_state_check(fw_st, FW_STATUS_LOADING) |
| 178 | #define fw_state_is_aborted(fw_st) \ | ||
| 179 | __fw_state_check(fw_st, FW_STATUS_ABORTED) | ||
| 180 | #define fw_state_wait_timeout(fw_st, timeout) \ | 176 | #define fw_state_wait_timeout(fw_st, timeout) \ |
| 181 | __fw_state_wait_common(fw_st, timeout) | 177 | __fw_state_wait_common(fw_st, timeout) |
| 182 | 178 | ||
| @@ -1200,6 +1196,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name, | |||
| 1200 | return 1; /* need to load */ | 1196 | return 1; /* need to load */ |
| 1201 | } | 1197 | } |
| 1202 | 1198 | ||
| 1199 | /* | ||
| 1200 | * Batched requests need only one wake, we need to do this step last due to the | ||
| 1201 | * fallback mechanism. The buf is protected with kref_get(), and it won't be | ||
| 1202 | * released until the last user calls release_firmware(). | ||
| 1203 | * | ||
| 1204 | * Failed batched requests are possible as well, in such cases we just share | ||
| 1205 | * the struct firmware_buf and won't release it until all requests are woken | ||
| 1206 | * and have gone through this same path. | ||
| 1207 | */ | ||
| 1208 | static void fw_abort_batch_reqs(struct firmware *fw) | ||
| 1209 | { | ||
| 1210 | struct firmware_buf *buf; | ||
| 1211 | |||
| 1212 | /* Loaded directly? */ | ||
| 1213 | if (!fw || !fw->priv) | ||
| 1214 | return; | ||
| 1215 | |||
| 1216 | buf = fw->priv; | ||
| 1217 | if (!fw_state_is_aborted(&buf->fw_st)) | ||
| 1218 | fw_state_aborted(&buf->fw_st); | ||
| 1219 | } | ||
| 1220 | |||
| 1203 | /* called from request_firmware() and request_firmware_work_func() */ | 1221 | /* called from request_firmware() and request_firmware_work_func() */ |
| 1204 | static int | 1222 | static int |
| 1205 | _request_firmware(const struct firmware **firmware_p, const char *name, | 1223 | _request_firmware(const struct firmware **firmware_p, const char *name, |
| @@ -1243,6 +1261,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name, | |||
| 1243 | 1261 | ||
| 1244 | out: | 1262 | out: |
| 1245 | if (ret < 0) { | 1263 | if (ret < 0) { |
| 1264 | fw_abort_batch_reqs(fw); | ||
| 1246 | release_firmware(fw); | 1265 | release_firmware(fw); |
| 1247 | fw = NULL; | 1266 | fw = NULL; |
| 1248 | } | 1267 | } |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 98e34e4c62b8..2468c28d4771 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -2075,9 +2075,9 @@ static int blkfront_resume(struct xenbus_device *dev) | |||
| 2075 | /* | 2075 | /* |
| 2076 | * Get the bios in the request so we can re-queue them. | 2076 | * Get the bios in the request so we can re-queue them. |
| 2077 | */ | 2077 | */ |
| 2078 | if (req_op(shadow[i].request) == REQ_OP_FLUSH || | 2078 | if (req_op(shadow[j].request) == REQ_OP_FLUSH || |
| 2079 | req_op(shadow[i].request) == REQ_OP_DISCARD || | 2079 | req_op(shadow[j].request) == REQ_OP_DISCARD || |
| 2080 | req_op(shadow[i].request) == REQ_OP_SECURE_ERASE || | 2080 | req_op(shadow[j].request) == REQ_OP_SECURE_ERASE || |
| 2081 | shadow[j].request->cmd_flags & REQ_FUA) { | 2081 | shadow[j].request->cmd_flags & REQ_FUA) { |
| 2082 | /* | 2082 | /* |
| 2083 | * Flush operations don't contain bios, so | 2083 | * Flush operations don't contain bios, so |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index fcae5ca6ac92..54a67f8a28eb 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
| @@ -262,7 +262,7 @@ config CLKSRC_LPC32XX | |||
| 262 | 262 | ||
| 263 | config CLKSRC_PISTACHIO | 263 | config CLKSRC_PISTACHIO |
| 264 | bool "Clocksource for Pistachio SoC" if COMPILE_TEST | 264 | bool "Clocksource for Pistachio SoC" if COMPILE_TEST |
| 265 | depends on HAS_IOMEM | 265 | depends on GENERIC_CLOCKEVENTS && HAS_IOMEM |
| 266 | select TIMER_OF | 266 | select TIMER_OF |
| 267 | help | 267 | help |
| 268 | Enables the clocksource for the Pistachio SoC. | 268 | Enables the clocksource for the Pistachio SoC. |
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index c62e71614c75..fd4b7f684bd0 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
| @@ -1444,7 +1444,7 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count) | |||
| 1444 | * While unlikely, it's theoretically possible that none of the frames | 1444 | * While unlikely, it's theoretically possible that none of the frames |
| 1445 | * in a timer expose the combination of feature we want. | 1445 | * in a timer expose the combination of feature we want. |
| 1446 | */ | 1446 | */ |
| 1447 | for (i = i; i < timer_count; i++) { | 1447 | for (i = 0; i < timer_count; i++) { |
| 1448 | timer = &timers[i]; | 1448 | timer = &timers[i]; |
| 1449 | 1449 | ||
| 1450 | frame = arch_timer_mem_find_best_frame(timer); | 1450 | frame = arch_timer_mem_find_best_frame(timer); |
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c index bc48cbf6a795..269db74a0658 100644 --- a/drivers/clocksource/em_sti.c +++ b/drivers/clocksource/em_sti.c | |||
| @@ -305,7 +305,7 @@ static int em_sti_probe(struct platform_device *pdev) | |||
| 305 | irq = platform_get_irq(pdev, 0); | 305 | irq = platform_get_irq(pdev, 0); |
| 306 | if (irq < 0) { | 306 | if (irq < 0) { |
| 307 | dev_err(&pdev->dev, "failed to get irq\n"); | 307 | dev_err(&pdev->dev, "failed to get irq\n"); |
| 308 | return -EINVAL; | 308 | return irq; |
| 309 | } | 309 | } |
| 310 | 310 | ||
| 311 | /* map memory, let base point to the STI instance */ | 311 | /* map memory, let base point to the STI instance */ |
| @@ -314,11 +314,12 @@ static int em_sti_probe(struct platform_device *pdev) | |||
| 314 | if (IS_ERR(p->base)) | 314 | if (IS_ERR(p->base)) |
| 315 | return PTR_ERR(p->base); | 315 | return PTR_ERR(p->base); |
| 316 | 316 | ||
| 317 | if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt, | 317 | ret = devm_request_irq(&pdev->dev, irq, em_sti_interrupt, |
| 318 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, | 318 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, |
| 319 | dev_name(&pdev->dev), p)) { | 319 | dev_name(&pdev->dev), p); |
| 320 | if (ret) { | ||
| 320 | dev_err(&pdev->dev, "failed to request low IRQ\n"); | 321 | dev_err(&pdev->dev, "failed to request low IRQ\n"); |
| 321 | return -ENOENT; | 322 | return ret; |
| 322 | } | 323 | } |
| 323 | 324 | ||
| 324 | /* get hold of clock */ | 325 | /* get hold of clock */ |
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c index d509b500a7b5..4d7aef9d9c15 100644 --- a/drivers/clocksource/timer-of.c +++ b/drivers/clocksource/timer-of.c | |||
| @@ -128,9 +128,9 @@ static __init int timer_base_init(struct device_node *np, | |||
| 128 | const char *name = of_base->name ? of_base->name : np->full_name; | 128 | const char *name = of_base->name ? of_base->name : np->full_name; |
| 129 | 129 | ||
| 130 | of_base->base = of_io_request_and_map(np, of_base->index, name); | 130 | of_base->base = of_io_request_and_map(np, of_base->index, name); |
| 131 | if (!of_base->base) { | 131 | if (IS_ERR(of_base->base)) { |
| 132 | pr_err("Failed to iomap (%s)\n", name); | 132 | pr_err("Failed to iomap (%s)\n", name); |
| 133 | return -ENXIO; | 133 | return PTR_ERR(of_base->base); |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | return 0; | 136 | return 0; |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 0566455f233e..65ee4fcace1f 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -1613,8 +1613,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) | |||
| 1613 | 1613 | ||
| 1614 | static inline int32_t get_avg_frequency(struct cpudata *cpu) | 1614 | static inline int32_t get_avg_frequency(struct cpudata *cpu) |
| 1615 | { | 1615 | { |
| 1616 | return mul_ext_fp(cpu->sample.core_avg_perf, | 1616 | return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz); |
| 1617 | cpu->pstate.max_pstate_physical * cpu->pstate.scaling); | ||
| 1618 | } | 1617 | } |
| 1619 | 1618 | ||
| 1620 | static inline int32_t get_avg_pstate(struct cpudata *cpu) | 1619 | static inline int32_t get_avg_pstate(struct cpudata *cpu) |
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index 37b0698b7193..42896a67aeae 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c | |||
| @@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len, | |||
| 235 | return -1; | 235 | return -1; |
| 236 | } | 236 | } |
| 237 | 237 | ||
| 238 | extern u32 pnv_get_supported_cpuidle_states(void); | ||
| 238 | static int powernv_add_idle_states(void) | 239 | static int powernv_add_idle_states(void) |
| 239 | { | 240 | { |
| 240 | struct device_node *power_mgt; | 241 | struct device_node *power_mgt; |
| @@ -248,6 +249,8 @@ static int powernv_add_idle_states(void) | |||
| 248 | const char *names[CPUIDLE_STATE_MAX]; | 249 | const char *names[CPUIDLE_STATE_MAX]; |
| 249 | u32 has_stop_states = 0; | 250 | u32 has_stop_states = 0; |
| 250 | int i, rc; | 251 | int i, rc; |
| 252 | u32 supported_flags = pnv_get_supported_cpuidle_states(); | ||
| 253 | |||
| 251 | 254 | ||
| 252 | /* Currently we have snooze statically defined */ | 255 | /* Currently we have snooze statically defined */ |
| 253 | 256 | ||
| @@ -362,6 +365,13 @@ static int powernv_add_idle_states(void) | |||
| 362 | for (i = 0; i < dt_idle_states; i++) { | 365 | for (i = 0; i < dt_idle_states; i++) { |
| 363 | unsigned int exit_latency, target_residency; | 366 | unsigned int exit_latency, target_residency; |
| 364 | bool stops_timebase = false; | 367 | bool stops_timebase = false; |
| 368 | |||
| 369 | /* | ||
| 370 | * Skip the platform idle state whose flag isn't in | ||
| 371 | * the supported_cpuidle_states flag mask. | ||
| 372 | */ | ||
| 373 | if ((flags[i] & supported_flags) != flags[i]) | ||
| 374 | continue; | ||
| 365 | /* | 375 | /* |
| 366 | * If an idle state has exit latency beyond | 376 | * If an idle state has exit latency beyond |
| 367 | * POWERNV_THRESHOLD_LATENCY_NS then don't use it | 377 | * POWERNV_THRESHOLD_LATENCY_NS then don't use it |
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 427cbe012729..dadc4a808df5 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
| @@ -1073,7 +1073,7 @@ static int aead_perform(struct aead_request *req, int encrypt, | |||
| 1073 | req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, | 1073 | req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, |
| 1074 | &crypt->icv_rev_aes); | 1074 | &crypt->icv_rev_aes); |
| 1075 | if (unlikely(!req_ctx->hmac_virt)) | 1075 | if (unlikely(!req_ctx->hmac_virt)) |
| 1076 | goto free_buf_src; | 1076 | goto free_buf_dst; |
| 1077 | if (!encrypt) { | 1077 | if (!encrypt) { |
| 1078 | scatterwalk_map_and_copy(req_ctx->hmac_virt, | 1078 | scatterwalk_map_and_copy(req_ctx->hmac_virt, |
| 1079 | req->src, cryptlen, authsize, 0); | 1079 | req->src, cryptlen, authsize, 0); |
| @@ -1088,10 +1088,10 @@ static int aead_perform(struct aead_request *req, int encrypt, | |||
| 1088 | BUG_ON(qmgr_stat_overflow(SEND_QID)); | 1088 | BUG_ON(qmgr_stat_overflow(SEND_QID)); |
| 1089 | return -EINPROGRESS; | 1089 | return -EINPROGRESS; |
| 1090 | 1090 | ||
| 1091 | free_buf_src: | ||
| 1092 | free_buf_chain(dev, req_ctx->src, crypt->src_buf); | ||
| 1093 | free_buf_dst: | 1091 | free_buf_dst: |
| 1094 | free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); | 1092 | free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); |
| 1093 | free_buf_src: | ||
| 1094 | free_buf_chain(dev, req_ctx->src, crypt->src_buf); | ||
| 1095 | crypt->ctl_flags = CTL_FLAG_UNUSED; | 1095 | crypt->ctl_flags = CTL_FLAG_UNUSED; |
| 1096 | return -ENOMEM; | 1096 | return -ENOMEM; |
| 1097 | } | 1097 | } |
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index e338c3743562..45c65f805fd6 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c | |||
| @@ -557,7 +557,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc) | |||
| 557 | edge_cause = mvebu_gpio_read_edge_cause(mvchip); | 557 | edge_cause = mvebu_gpio_read_edge_cause(mvchip); |
| 558 | edge_mask = mvebu_gpio_read_edge_mask(mvchip); | 558 | edge_mask = mvebu_gpio_read_edge_mask(mvchip); |
| 559 | 559 | ||
| 560 | cause = (data_in ^ level_mask) | (edge_cause & edge_mask); | 560 | cause = (data_in & level_mask) | (edge_cause & edge_mask); |
| 561 | 561 | ||
| 562 | for (i = 0; i < mvchip->chip.ngpio; i++) { | 562 | for (i = 0; i < mvchip->chip.ngpio; i++) { |
| 563 | int irq; | 563 | int irq; |
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 16fe9742597b..fc80add5fedb 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #include <linux/mutex.h> | 2 | #include <linux/mutex.h> |
| 3 | #include <linux/device.h> | 3 | #include <linux/device.h> |
| 4 | #include <linux/sysfs.h> | 4 | #include <linux/sysfs.h> |
| 5 | #include <linux/gpio.h> | ||
| 5 | #include <linux/gpio/consumer.h> | 6 | #include <linux/gpio/consumer.h> |
| 6 | #include <linux/gpio/driver.h> | 7 | #include <linux/gpio/driver.h> |
| 7 | #include <linux/interrupt.h> | 8 | #include <linux/interrupt.h> |
| @@ -432,6 +433,11 @@ static struct attribute *gpiochip_attrs[] = { | |||
| 432 | }; | 433 | }; |
| 433 | ATTRIBUTE_GROUPS(gpiochip); | 434 | ATTRIBUTE_GROUPS(gpiochip); |
| 434 | 435 | ||
| 436 | static struct gpio_desc *gpio_to_valid_desc(int gpio) | ||
| 437 | { | ||
| 438 | return gpio_is_valid(gpio) ? gpio_to_desc(gpio) : NULL; | ||
| 439 | } | ||
| 440 | |||
| 435 | /* | 441 | /* |
| 436 | * /sys/class/gpio/export ... write-only | 442 | * /sys/class/gpio/export ... write-only |
| 437 | * integer N ... number of GPIO to export (full access) | 443 | * integer N ... number of GPIO to export (full access) |
| @@ -450,7 +456,7 @@ static ssize_t export_store(struct class *class, | |||
| 450 | if (status < 0) | 456 | if (status < 0) |
| 451 | goto done; | 457 | goto done; |
| 452 | 458 | ||
| 453 | desc = gpio_to_desc(gpio); | 459 | desc = gpio_to_valid_desc(gpio); |
| 454 | /* reject invalid GPIOs */ | 460 | /* reject invalid GPIOs */ |
| 455 | if (!desc) { | 461 | if (!desc) { |
| 456 | pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); | 462 | pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); |
| @@ -493,7 +499,7 @@ static ssize_t unexport_store(struct class *class, | |||
| 493 | if (status < 0) | 499 | if (status < 0) |
| 494 | goto done; | 500 | goto done; |
| 495 | 501 | ||
| 496 | desc = gpio_to_desc(gpio); | 502 | desc = gpio_to_valid_desc(gpio); |
| 497 | /* reject bogus commands (gpio_unexport ignores them) */ | 503 | /* reject bogus commands (gpio_unexport ignores them) */ |
| 498 | if (!desc) { | 504 | if (!desc) { |
| 499 | pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); | 505 | pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index a6899180b265..c586f44312f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | |||
| @@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | |||
| 244 | struct dma_fence *f = e->fence; | 244 | struct dma_fence *f = e->fence; |
| 245 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); | 245 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); |
| 246 | 246 | ||
| 247 | if (dma_fence_is_signaled(f)) { | ||
| 248 | hash_del(&e->node); | ||
| 249 | dma_fence_put(f); | ||
| 250 | kmem_cache_free(amdgpu_sync_slab, e); | ||
| 251 | continue; | ||
| 252 | } | ||
| 247 | if (ring && s_fence) { | 253 | if (ring && s_fence) { |
| 248 | /* For fences from the same ring it is sufficient | 254 | /* For fences from the same ring it is sufficient |
| 249 | * when they are scheduled. | 255 | * when they are scheduled. |
| @@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | |||
| 256 | } | 262 | } |
| 257 | } | 263 | } |
| 258 | 264 | ||
| 259 | if (dma_fence_is_signaled(f)) { | ||
| 260 | hash_del(&e->node); | ||
| 261 | dma_fence_put(f); | ||
| 262 | kmem_cache_free(amdgpu_sync_slab, e); | ||
| 263 | continue; | ||
| 264 | } | ||
| 265 | |||
| 266 | return f; | 265 | return f; |
| 267 | } | 266 | } |
| 268 | 267 | ||
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0511fce5c947..e3a81ed66bc2 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -4581,7 +4581,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, | |||
| 4581 | 4581 | ||
| 4582 | sseu->slice_mask |= BIT(s); | 4582 | sseu->slice_mask |= BIT(s); |
| 4583 | 4583 | ||
| 4584 | if (IS_GEN9_BC(dev_priv)) | 4584 | if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) |
| 4585 | sseu->subslice_mask = | 4585 | sseu->subslice_mask = |
| 4586 | INTEL_INFO(dev_priv)->sseu.subslice_mask; | 4586 | INTEL_INFO(dev_priv)->sseu.subslice_mask; |
| 4587 | 4587 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 39ed58a21fc1..e1e971ee2ed5 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
| @@ -688,19 +688,19 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt, | |||
| 688 | } | 688 | } |
| 689 | 689 | ||
| 690 | static bool | 690 | static bool |
| 691 | needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, | 691 | needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine) |
| 692 | struct intel_engine_cs *engine, | ||
| 693 | struct i915_gem_context *to) | ||
| 694 | { | 692 | { |
| 693 | struct i915_gem_context *from = engine->legacy_active_context; | ||
| 694 | |||
| 695 | if (!ppgtt) | 695 | if (!ppgtt) |
| 696 | return false; | 696 | return false; |
| 697 | 697 | ||
| 698 | /* Always load the ppgtt on first use */ | 698 | /* Always load the ppgtt on first use */ |
| 699 | if (!engine->legacy_active_context) | 699 | if (!from) |
| 700 | return true; | 700 | return true; |
| 701 | 701 | ||
| 702 | /* Same context without new entries, skip */ | 702 | /* Same context without new entries, skip */ |
| 703 | if (engine->legacy_active_context == to && | 703 | if ((!from->ppgtt || from->ppgtt == ppgtt) && |
| 704 | !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) | 704 | !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) |
| 705 | return false; | 705 | return false; |
| 706 | 706 | ||
| @@ -744,7 +744,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) | |||
| 744 | if (skip_rcs_switch(ppgtt, engine, to)) | 744 | if (skip_rcs_switch(ppgtt, engine, to)) |
| 745 | return 0; | 745 | return 0; |
| 746 | 746 | ||
| 747 | if (needs_pd_load_pre(ppgtt, engine, to)) { | 747 | if (needs_pd_load_pre(ppgtt, engine)) { |
| 748 | /* Older GENs and non render rings still want the load first, | 748 | /* Older GENs and non render rings still want the load first, |
| 749 | * "PP_DCLV followed by PP_DIR_BASE register through Load | 749 | * "PP_DCLV followed by PP_DIR_BASE register through Load |
| 750 | * Register Immediate commands in Ring Buffer before submitting | 750 | * Register Immediate commands in Ring Buffer before submitting |
| @@ -841,7 +841,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) | |||
| 841 | struct i915_hw_ppgtt *ppgtt = | 841 | struct i915_hw_ppgtt *ppgtt = |
| 842 | to->ppgtt ?: req->i915->mm.aliasing_ppgtt; | 842 | to->ppgtt ?: req->i915->mm.aliasing_ppgtt; |
| 843 | 843 | ||
| 844 | if (needs_pd_load_pre(ppgtt, engine, to)) { | 844 | if (needs_pd_load_pre(ppgtt, engine)) { |
| 845 | int ret; | 845 | int ret; |
| 846 | 846 | ||
| 847 | trace_switch_mm(engine, to); | 847 | trace_switch_mm(engine, to); |
| @@ -852,6 +852,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) | |||
| 852 | ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); | 852 | ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); |
| 853 | } | 853 | } |
| 854 | 854 | ||
| 855 | engine->legacy_active_context = to; | ||
| 855 | return 0; | 856 | return 0; |
| 856 | } | 857 | } |
| 857 | 858 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 7032c542a9b1..4dd4c2159a92 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c | |||
| @@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req) | |||
| 242 | goto err_unpin; | 242 | goto err_unpin; |
| 243 | } | 243 | } |
| 244 | 244 | ||
| 245 | ret = req->engine->emit_flush(req, EMIT_INVALIDATE); | ||
| 246 | if (ret) | ||
| 247 | goto err_unpin; | ||
| 248 | |||
| 245 | ret = req->engine->emit_bb_start(req, | 249 | ret = req->engine->emit_bb_start(req, |
| 246 | so->batch_offset, so->batch_size, | 250 | so->batch_offset, so->batch_size, |
| 247 | I915_DISPATCH_SECURE); | 251 | I915_DISPATCH_SECURE); |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 9edeaaef77ad..d3b3252a8742 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -1762,7 +1762,7 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, | |||
| 1762 | if (dev_priv->vbt.edp.low_vswing) { | 1762 | if (dev_priv->vbt.edp.low_vswing) { |
| 1763 | if (voltage == VOLTAGE_INFO_0_85V) { | 1763 | if (voltage == VOLTAGE_INFO_0_85V) { |
| 1764 | *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V); | 1764 | *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V); |
| 1765 | return cnl_ddi_translations_dp_0_85V; | 1765 | return cnl_ddi_translations_edp_0_85V; |
| 1766 | } else if (voltage == VOLTAGE_INFO_0_95V) { | 1766 | } else if (voltage == VOLTAGE_INFO_0_95V) { |
| 1767 | *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V); | 1767 | *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V); |
| 1768 | return cnl_ddi_translations_edp_0_95V; | 1768 | return cnl_ddi_translations_edp_0_95V; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9471c88d449e..cc484b56eeaa 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -3485,6 +3485,13 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) | |||
| 3485 | !gpu_reset_clobbers_display(dev_priv)) | 3485 | !gpu_reset_clobbers_display(dev_priv)) |
| 3486 | return; | 3486 | return; |
| 3487 | 3487 | ||
| 3488 | /* We have a modeset vs reset deadlock, defensively unbreak it. | ||
| 3489 | * | ||
| 3490 | * FIXME: We can do a _lot_ better, this is just a first iteration. | ||
| 3491 | */ | ||
| 3492 | i915_gem_set_wedged(dev_priv); | ||
| 3493 | DRM_DEBUG_DRIVER("Wedging GPU to avoid deadlocks with pending modeset updates\n"); | ||
| 3494 | |||
| 3488 | /* | 3495 | /* |
| 3489 | * Need mode_config.mutex so that we don't | 3496 | * Need mode_config.mutex so that we don't |
| 3490 | * trample ongoing ->detect() and whatnot. | 3497 | * trample ongoing ->detect() and whatnot. |
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 52b3a1fd4059..57ef5833c427 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h | |||
| @@ -63,7 +63,6 @@ enum { | |||
| 63 | }; | 63 | }; |
| 64 | 64 | ||
| 65 | /* Logical Rings */ | 65 | /* Logical Rings */ |
| 66 | void intel_logical_ring_stop(struct intel_engine_cs *engine); | ||
| 67 | void intel_logical_ring_cleanup(struct intel_engine_cs *engine); | 66 | void intel_logical_ring_cleanup(struct intel_engine_cs *engine); |
| 68 | int logical_render_ring_init(struct intel_engine_cs *engine); | 67 | int logical_render_ring_init(struct intel_engine_cs *engine); |
| 69 | int logical_xcs_ring_init(struct intel_engine_cs *engine); | 68 | int logical_xcs_ring_init(struct intel_engine_cs *engine); |
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 6b5d3be283c4..807299dd45eb 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c | |||
| @@ -193,7 +193,6 @@ struct bmc150_accel_data { | |||
| 193 | struct regmap *regmap; | 193 | struct regmap *regmap; |
| 194 | int irq; | 194 | int irq; |
| 195 | struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS]; | 195 | struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS]; |
| 196 | atomic_t active_intr; | ||
| 197 | struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS]; | 196 | struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS]; |
| 198 | struct mutex mutex; | 197 | struct mutex mutex; |
| 199 | u8 fifo_mode, watermark; | 198 | u8 fifo_mode, watermark; |
| @@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i, | |||
| 493 | goto out_fix_power_state; | 492 | goto out_fix_power_state; |
| 494 | } | 493 | } |
| 495 | 494 | ||
| 496 | if (state) | ||
| 497 | atomic_inc(&data->active_intr); | ||
| 498 | else | ||
| 499 | atomic_dec(&data->active_intr); | ||
| 500 | |||
| 501 | return 0; | 495 | return 0; |
| 502 | 496 | ||
| 503 | out_fix_power_state: | 497 | out_fix_power_state: |
| @@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev) | |||
| 1710 | struct bmc150_accel_data *data = iio_priv(indio_dev); | 1704 | struct bmc150_accel_data *data = iio_priv(indio_dev); |
| 1711 | 1705 | ||
| 1712 | mutex_lock(&data->mutex); | 1706 | mutex_lock(&data->mutex); |
| 1713 | if (atomic_read(&data->active_intr)) | 1707 | bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); |
| 1714 | bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); | ||
| 1715 | bmc150_accel_fifo_set_mode(data); | 1708 | bmc150_accel_fifo_set_mode(data); |
| 1716 | mutex_unlock(&data->mutex); | 1709 | mutex_unlock(&data->mutex); |
| 1717 | 1710 | ||
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 07d1489cd457..e44f62bf9caa 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c | |||
| @@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 166 | .mask_ihl = 0x02, | 166 | .mask_ihl = 0x02, |
| 167 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | 167 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, |
| 168 | }, | 168 | }, |
| 169 | .sim = { | ||
| 170 | .addr = 0x23, | ||
| 171 | .value = BIT(0), | ||
| 172 | }, | ||
| 169 | .multi_read_bit = true, | 173 | .multi_read_bit = true, |
| 170 | .bootime = 2, | 174 | .bootime = 2, |
| 171 | }, | 175 | }, |
| @@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 234 | .mask_od = 0x40, | 238 | .mask_od = 0x40, |
| 235 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | 239 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, |
| 236 | }, | 240 | }, |
| 241 | .sim = { | ||
| 242 | .addr = 0x23, | ||
| 243 | .value = BIT(0), | ||
| 244 | }, | ||
| 237 | .multi_read_bit = true, | 245 | .multi_read_bit = true, |
| 238 | .bootime = 2, | 246 | .bootime = 2, |
| 239 | }, | 247 | }, |
| @@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 316 | .en_mask = 0x08, | 324 | .en_mask = 0x08, |
| 317 | }, | 325 | }, |
| 318 | }, | 326 | }, |
| 327 | .sim = { | ||
| 328 | .addr = 0x24, | ||
| 329 | .value = BIT(0), | ||
| 330 | }, | ||
| 319 | .multi_read_bit = false, | 331 | .multi_read_bit = false, |
| 320 | .bootime = 2, | 332 | .bootime = 2, |
| 321 | }, | 333 | }, |
| @@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 379 | .mask_int1 = 0x04, | 391 | .mask_int1 = 0x04, |
| 380 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | 392 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, |
| 381 | }, | 393 | }, |
| 394 | .sim = { | ||
| 395 | .addr = 0x21, | ||
| 396 | .value = BIT(1), | ||
| 397 | }, | ||
| 382 | .multi_read_bit = true, | 398 | .multi_read_bit = true, |
| 383 | .bootime = 2, /* guess */ | 399 | .bootime = 2, /* guess */ |
| 384 | }, | 400 | }, |
| @@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 437 | .mask_od = 0x40, | 453 | .mask_od = 0x40, |
| 438 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | 454 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, |
| 439 | }, | 455 | }, |
| 456 | .sim = { | ||
| 457 | .addr = 0x21, | ||
| 458 | .value = BIT(7), | ||
| 459 | }, | ||
| 440 | .multi_read_bit = false, | 460 | .multi_read_bit = false, |
| 441 | .bootime = 2, /* guess */ | 461 | .bootime = 2, /* guess */ |
| 442 | }, | 462 | }, |
| @@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 499 | .addr_ihl = 0x22, | 519 | .addr_ihl = 0x22, |
| 500 | .mask_ihl = 0x80, | 520 | .mask_ihl = 0x80, |
| 501 | }, | 521 | }, |
| 522 | .sim = { | ||
| 523 | .addr = 0x23, | ||
| 524 | .value = BIT(0), | ||
| 525 | }, | ||
| 502 | .multi_read_bit = true, | 526 | .multi_read_bit = true, |
| 503 | .bootime = 2, | 527 | .bootime = 2, |
| 504 | }, | 528 | }, |
| @@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 547 | .mask_int1 = 0x04, | 571 | .mask_int1 = 0x04, |
| 548 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | 572 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, |
| 549 | }, | 573 | }, |
| 574 | .sim = { | ||
| 575 | .addr = 0x21, | ||
| 576 | .value = BIT(1), | ||
| 577 | }, | ||
| 550 | .multi_read_bit = false, | 578 | .multi_read_bit = false, |
| 551 | .bootime = 2, | 579 | .bootime = 2, |
| 552 | }, | 580 | }, |
| @@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 614 | .mask_ihl = 0x02, | 642 | .mask_ihl = 0x02, |
| 615 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | 643 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, |
| 616 | }, | 644 | }, |
| 645 | .sim = { | ||
| 646 | .addr = 0x23, | ||
| 647 | .value = BIT(0), | ||
| 648 | }, | ||
| 617 | .multi_read_bit = true, | 649 | .multi_read_bit = true, |
| 618 | .bootime = 2, | 650 | .bootime = 2, |
| 619 | }, | 651 | }, |
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c index e0ea411a0b2d..c02b23d675cb 100644 --- a/drivers/iio/adc/aspeed_adc.c +++ b/drivers/iio/adc/aspeed_adc.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | 22 | ||
| 23 | #include <linux/iio/iio.h> | 23 | #include <linux/iio/iio.h> |
| 24 | #include <linux/iio/driver.h> | 24 | #include <linux/iio/driver.h> |
| 25 | #include <linux/iopoll.h> | ||
| 25 | 26 | ||
| 26 | #define ASPEED_RESOLUTION_BITS 10 | 27 | #define ASPEED_RESOLUTION_BITS 10 |
| 27 | #define ASPEED_CLOCKS_PER_SAMPLE 12 | 28 | #define ASPEED_CLOCKS_PER_SAMPLE 12 |
| @@ -38,11 +39,17 @@ | |||
| 38 | 39 | ||
| 39 | #define ASPEED_ENGINE_ENABLE BIT(0) | 40 | #define ASPEED_ENGINE_ENABLE BIT(0) |
| 40 | 41 | ||
| 42 | #define ASPEED_ADC_CTRL_INIT_RDY BIT(8) | ||
| 43 | |||
| 44 | #define ASPEED_ADC_INIT_POLLING_TIME 500 | ||
| 45 | #define ASPEED_ADC_INIT_TIMEOUT 500000 | ||
| 46 | |||
| 41 | struct aspeed_adc_model_data { | 47 | struct aspeed_adc_model_data { |
| 42 | const char *model_name; | 48 | const char *model_name; |
| 43 | unsigned int min_sampling_rate; // Hz | 49 | unsigned int min_sampling_rate; // Hz |
| 44 | unsigned int max_sampling_rate; // Hz | 50 | unsigned int max_sampling_rate; // Hz |
| 45 | unsigned int vref_voltage; // mV | 51 | unsigned int vref_voltage; // mV |
| 52 | bool wait_init_sequence; | ||
| 46 | }; | 53 | }; |
| 47 | 54 | ||
| 48 | struct aspeed_adc_data { | 55 | struct aspeed_adc_data { |
| @@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev) | |||
| 211 | goto scaler_error; | 218 | goto scaler_error; |
| 212 | } | 219 | } |
| 213 | 220 | ||
| 221 | model_data = of_device_get_match_data(&pdev->dev); | ||
| 222 | |||
| 223 | if (model_data->wait_init_sequence) { | ||
| 224 | /* Enable engine in normal mode. */ | ||
| 225 | writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE, | ||
| 226 | data->base + ASPEED_REG_ENGINE_CONTROL); | ||
| 227 | |||
| 228 | /* Wait for initial sequence complete. */ | ||
| 229 | ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL, | ||
| 230 | adc_engine_control_reg_val, | ||
| 231 | adc_engine_control_reg_val & | ||
| 232 | ASPEED_ADC_CTRL_INIT_RDY, | ||
| 233 | ASPEED_ADC_INIT_POLLING_TIME, | ||
| 234 | ASPEED_ADC_INIT_TIMEOUT); | ||
| 235 | if (ret) | ||
| 236 | goto scaler_error; | ||
| 237 | } | ||
| 238 | |||
| 214 | /* Start all channels in normal mode. */ | 239 | /* Start all channels in normal mode. */ |
| 215 | ret = clk_prepare_enable(data->clk_scaler->clk); | 240 | ret = clk_prepare_enable(data->clk_scaler->clk); |
| 216 | if (ret) | 241 | if (ret) |
| @@ -274,6 +299,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = { | |||
| 274 | .vref_voltage = 1800, // mV | 299 | .vref_voltage = 1800, // mV |
| 275 | .min_sampling_rate = 1, | 300 | .min_sampling_rate = 1, |
| 276 | .max_sampling_rate = 1000000, | 301 | .max_sampling_rate = 1000000, |
| 302 | .wait_init_sequence = true, | ||
| 277 | }; | 303 | }; |
| 278 | 304 | ||
| 279 | static const struct of_device_id aspeed_adc_matches[] = { | 305 | static const struct of_device_id aspeed_adc_matches[] = { |
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index 64799ad7ebad..462a99c13e7a 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c | |||
| @@ -28,6 +28,8 @@ | |||
| 28 | #include <linux/iio/driver.h> | 28 | #include <linux/iio/driver.h> |
| 29 | 29 | ||
| 30 | #define AXP288_ADC_EN_MASK 0xF1 | 30 | #define AXP288_ADC_EN_MASK 0xF1 |
| 31 | #define AXP288_ADC_TS_PIN_GPADC 0xF2 | ||
| 32 | #define AXP288_ADC_TS_PIN_ON 0xF3 | ||
| 31 | 33 | ||
| 32 | enum axp288_adc_id { | 34 | enum axp288_adc_id { |
| 33 | AXP288_ADC_TS, | 35 | AXP288_ADC_TS, |
| @@ -121,6 +123,26 @@ static int axp288_adc_read_channel(int *val, unsigned long address, | |||
| 121 | return IIO_VAL_INT; | 123 | return IIO_VAL_INT; |
| 122 | } | 124 | } |
| 123 | 125 | ||
| 126 | static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, | ||
| 127 | unsigned long address) | ||
| 128 | { | ||
| 129 | int ret; | ||
| 130 | |||
| 131 | /* channels other than GPADC do not need to switch TS pin */ | ||
| 132 | if (address != AXP288_GP_ADC_H) | ||
| 133 | return 0; | ||
| 134 | |||
| 135 | ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); | ||
| 136 | if (ret) | ||
| 137 | return ret; | ||
| 138 | |||
| 139 | /* When switching to the GPADC pin give things some time to settle */ | ||
| 140 | if (mode == AXP288_ADC_TS_PIN_GPADC) | ||
| 141 | usleep_range(6000, 10000); | ||
| 142 | |||
| 143 | return 0; | ||
| 144 | } | ||
| 145 | |||
| 124 | static int axp288_adc_read_raw(struct iio_dev *indio_dev, | 146 | static int axp288_adc_read_raw(struct iio_dev *indio_dev, |
| 125 | struct iio_chan_spec const *chan, | 147 | struct iio_chan_spec const *chan, |
| 126 | int *val, int *val2, long mask) | 148 | int *val, int *val2, long mask) |
| @@ -131,7 +153,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, | |||
| 131 | mutex_lock(&indio_dev->mlock); | 153 | mutex_lock(&indio_dev->mlock); |
| 132 | switch (mask) { | 154 | switch (mask) { |
| 133 | case IIO_CHAN_INFO_RAW: | 155 | case IIO_CHAN_INFO_RAW: |
| 156 | if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, | ||
| 157 | chan->address)) { | ||
| 158 | dev_err(&indio_dev->dev, "GPADC mode\n"); | ||
| 159 | ret = -EINVAL; | ||
| 160 | break; | ||
| 161 | } | ||
| 134 | ret = axp288_adc_read_channel(val, chan->address, info->regmap); | 162 | ret = axp288_adc_read_channel(val, chan->address, info->regmap); |
| 163 | if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, | ||
| 164 | chan->address)) | ||
| 165 | dev_err(&indio_dev->dev, "TS pin restore\n"); | ||
| 135 | break; | 166 | break; |
| 136 | default: | 167 | default: |
| 137 | ret = -EINVAL; | 168 | ret = -EINVAL; |
| @@ -141,6 +172,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, | |||
| 141 | return ret; | 172 | return ret; |
| 142 | } | 173 | } |
| 143 | 174 | ||
| 175 | static int axp288_adc_set_state(struct regmap *regmap) | ||
| 176 | { | ||
| 177 | /* ADC should be always enabled for internal FG to function */ | ||
| 178 | if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) | ||
| 179 | return -EIO; | ||
| 180 | |||
| 181 | return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); | ||
| 182 | } | ||
| 183 | |||
| 144 | static const struct iio_info axp288_adc_iio_info = { | 184 | static const struct iio_info axp288_adc_iio_info = { |
| 145 | .read_raw = &axp288_adc_read_raw, | 185 | .read_raw = &axp288_adc_read_raw, |
| 146 | .driver_module = THIS_MODULE, | 186 | .driver_module = THIS_MODULE, |
| @@ -169,7 +209,7 @@ static int axp288_adc_probe(struct platform_device *pdev) | |||
| 169 | * Set ADC to enabled state at all time, including system suspend. | 209 | * Set ADC to enabled state at all time, including system suspend. |
| 170 | * otherwise internal fuel gauge functionality may be affected. | 210 | * otherwise internal fuel gauge functionality may be affected. |
| 171 | */ | 211 | */ |
| 172 | ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); | 212 | ret = axp288_adc_set_state(axp20x->regmap); |
| 173 | if (ret) { | 213 | if (ret) { |
| 174 | dev_err(&pdev->dev, "unable to enable ADC device\n"); | 214 | dev_err(&pdev->dev, "unable to enable ADC device\n"); |
| 175 | return ret; | 215 | return ret; |
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c index 81d4c39e414a..137f577d9432 100644 --- a/drivers/iio/adc/sun4i-gpadc-iio.c +++ b/drivers/iio/adc/sun4i-gpadc-iio.c | |||
| @@ -256,6 +256,7 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val, | |||
| 256 | 256 | ||
| 257 | err: | 257 | err: |
| 258 | pm_runtime_put_autosuspend(indio_dev->dev.parent); | 258 | pm_runtime_put_autosuspend(indio_dev->dev.parent); |
| 259 | disable_irq(irq); | ||
| 259 | mutex_unlock(&info->mutex); | 260 | mutex_unlock(&info->mutex); |
| 260 | 261 | ||
| 261 | return ret; | 262 | return ret; |
| @@ -365,7 +366,6 @@ static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id) | |||
| 365 | complete(&info->completion); | 366 | complete(&info->completion); |
| 366 | 367 | ||
| 367 | out: | 368 | out: |
| 368 | disable_irq_nosync(info->temp_data_irq); | ||
| 369 | return IRQ_HANDLED; | 369 | return IRQ_HANDLED; |
| 370 | } | 370 | } |
| 371 | 371 | ||
| @@ -380,7 +380,6 @@ static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id) | |||
| 380 | complete(&info->completion); | 380 | complete(&info->completion); |
| 381 | 381 | ||
| 382 | out: | 382 | out: |
| 383 | disable_irq_nosync(info->fifo_data_irq); | ||
| 384 | return IRQ_HANDLED; | 383 | return IRQ_HANDLED; |
| 385 | } | 384 | } |
| 386 | 385 | ||
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 01fc76f7d660..c168e0db329a 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c | |||
| @@ -77,7 +77,7 @@ | |||
| 77 | #define VF610_ADC_ADSTS_MASK 0x300 | 77 | #define VF610_ADC_ADSTS_MASK 0x300 |
| 78 | #define VF610_ADC_ADLPC_EN 0x80 | 78 | #define VF610_ADC_ADLPC_EN 0x80 |
| 79 | #define VF610_ADC_ADHSC_EN 0x400 | 79 | #define VF610_ADC_ADHSC_EN 0x400 |
| 80 | #define VF610_ADC_REFSEL_VALT 0x100 | 80 | #define VF610_ADC_REFSEL_VALT 0x800 |
| 81 | #define VF610_ADC_REFSEL_VBG 0x1000 | 81 | #define VF610_ADC_REFSEL_VBG 0x1000 |
| 82 | #define VF610_ADC_ADTRG_HARD 0x2000 | 82 | #define VF610_ADC_ADTRG_HARD 0x2000 |
| 83 | #define VF610_ADC_AVGS_8 0x4000 | 83 | #define VF610_ADC_AVGS_8 0x4000 |
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 79c8c7cd70d5..6e6a1ecc99dd 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c | |||
| @@ -550,6 +550,31 @@ out: | |||
| 550 | } | 550 | } |
| 551 | EXPORT_SYMBOL(st_sensors_read_info_raw); | 551 | EXPORT_SYMBOL(st_sensors_read_info_raw); |
| 552 | 552 | ||
| 553 | static int st_sensors_init_interface_mode(struct iio_dev *indio_dev, | ||
| 554 | const struct st_sensor_settings *sensor_settings) | ||
| 555 | { | ||
| 556 | struct st_sensor_data *sdata = iio_priv(indio_dev); | ||
| 557 | struct device_node *np = sdata->dev->of_node; | ||
| 558 | struct st_sensors_platform_data *pdata; | ||
| 559 | |||
| 560 | pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data; | ||
| 561 | if (((np && of_property_read_bool(np, "spi-3wire")) || | ||
| 562 | (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) { | ||
| 563 | int err; | ||
| 564 | |||
| 565 | err = sdata->tf->write_byte(&sdata->tb, sdata->dev, | ||
| 566 | sensor_settings->sim.addr, | ||
| 567 | sensor_settings->sim.value); | ||
| 568 | if (err < 0) { | ||
| 569 | dev_err(&indio_dev->dev, | ||
| 570 | "failed to init interface mode\n"); | ||
| 571 | return err; | ||
| 572 | } | ||
| 573 | } | ||
| 574 | |||
| 575 | return 0; | ||
| 576 | } | ||
| 577 | |||
| 553 | int st_sensors_check_device_support(struct iio_dev *indio_dev, | 578 | int st_sensors_check_device_support(struct iio_dev *indio_dev, |
| 554 | int num_sensors_list, | 579 | int num_sensors_list, |
| 555 | const struct st_sensor_settings *sensor_settings) | 580 | const struct st_sensor_settings *sensor_settings) |
| @@ -574,6 +599,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev, | |||
| 574 | return -ENODEV; | 599 | return -ENODEV; |
| 575 | } | 600 | } |
| 576 | 601 | ||
| 602 | err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]); | ||
| 603 | if (err < 0) | ||
| 604 | return err; | ||
| 605 | |||
| 577 | if (sensor_settings[i].wai_addr) { | 606 | if (sensor_settings[i].wai_addr) { |
| 578 | err = sdata->tf->read_byte(&sdata->tb, sdata->dev, | 607 | err = sdata->tf->read_byte(&sdata->tb, sdata->dev, |
| 579 | sensor_settings[i].wai_addr, &wai); | 608 | sensor_settings[i].wai_addr, &wai); |
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c index e7d4ea75e007..7599693f7fe9 100644 --- a/drivers/iio/light/tsl2563.c +++ b/drivers/iio/light/tsl2563.c | |||
| @@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private) | |||
| 626 | struct tsl2563_chip *chip = iio_priv(dev_info); | 626 | struct tsl2563_chip *chip = iio_priv(dev_info); |
| 627 | 627 | ||
| 628 | iio_push_event(dev_info, | 628 | iio_push_event(dev_info, |
| 629 | IIO_UNMOD_EVENT_CODE(IIO_LIGHT, | 629 | IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, |
| 630 | 0, | 630 | 0, |
| 631 | IIO_EV_TYPE_THRESH, | 631 | IIO_EV_TYPE_THRESH, |
| 632 | IIO_EV_DIR_EITHER), | 632 | IIO_EV_DIR_EITHER), |
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index aa61ec15c139..f1bce05ffa13 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c | |||
| @@ -456,7 +456,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { | |||
| 456 | .mask_od = 0x40, | 456 | .mask_od = 0x40, |
| 457 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | 457 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, |
| 458 | }, | 458 | }, |
| 459 | .multi_read_bit = true, | 459 | .multi_read_bit = false, |
| 460 | .bootime = 2, | 460 | .bootime = 2, |
| 461 | }, | 461 | }, |
| 462 | }; | 462 | }; |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index a5dfab6adf49..221468f77128 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
| @@ -537,10 +537,11 @@ void ib_unregister_device(struct ib_device *device) | |||
| 537 | } | 537 | } |
| 538 | up_read(&lists_rwsem); | 538 | up_read(&lists_rwsem); |
| 539 | 539 | ||
| 540 | mutex_unlock(&device_mutex); | ||
| 541 | |||
| 542 | ib_device_unregister_rdmacg(device); | 540 | ib_device_unregister_rdmacg(device); |
| 543 | ib_device_unregister_sysfs(device); | 541 | ib_device_unregister_sysfs(device); |
| 542 | |||
| 543 | mutex_unlock(&device_mutex); | ||
| 544 | |||
| 544 | ib_cache_cleanup_one(device); | 545 | ib_cache_cleanup_one(device); |
| 545 | 546 | ||
| 546 | ib_security_destroy_port_pkey_list(device); | 547 | ib_security_destroy_port_pkey_list(device); |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index c551d2b275fd..739bd69ef1d4 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
| @@ -1015,7 +1015,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, | |||
| 1015 | cq->uobject = &obj->uobject; | 1015 | cq->uobject = &obj->uobject; |
| 1016 | cq->comp_handler = ib_uverbs_comp_handler; | 1016 | cq->comp_handler = ib_uverbs_comp_handler; |
| 1017 | cq->event_handler = ib_uverbs_cq_event_handler; | 1017 | cq->event_handler = ib_uverbs_cq_event_handler; |
| 1018 | cq->cq_context = &ev_file->ev_queue; | 1018 | cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; |
| 1019 | atomic_set(&cq->usecnt, 0); | 1019 | atomic_set(&cq->usecnt, 0); |
| 1020 | 1020 | ||
| 1021 | obj->uobject.object = cq; | 1021 | obj->uobject.object = cq; |
| @@ -1522,6 +1522,7 @@ static int create_qp(struct ib_uverbs_file *file, | |||
| 1522 | qp->qp_type = attr.qp_type; | 1522 | qp->qp_type = attr.qp_type; |
| 1523 | atomic_set(&qp->usecnt, 0); | 1523 | atomic_set(&qp->usecnt, 0); |
| 1524 | atomic_inc(&pd->usecnt); | 1524 | atomic_inc(&pd->usecnt); |
| 1525 | qp->port = 0; | ||
| 1525 | if (attr.send_cq) | 1526 | if (attr.send_cq) |
| 1526 | atomic_inc(&attr.send_cq->usecnt); | 1527 | atomic_inc(&attr.send_cq->usecnt); |
| 1527 | if (attr.recv_cq) | 1528 | if (attr.recv_cq) |
| @@ -1962,8 +1963,9 @@ static int modify_qp(struct ib_uverbs_file *file, | |||
| 1962 | attr->alt_timeout = cmd->base.alt_timeout; | 1963 | attr->alt_timeout = cmd->base.alt_timeout; |
| 1963 | attr->rate_limit = cmd->rate_limit; | 1964 | attr->rate_limit = cmd->rate_limit; |
| 1964 | 1965 | ||
| 1965 | attr->ah_attr.type = rdma_ah_find_type(qp->device, | 1966 | if (cmd->base.attr_mask & IB_QP_AV) |
| 1966 | cmd->base.dest.port_num); | 1967 | attr->ah_attr.type = rdma_ah_find_type(qp->device, |
| 1968 | cmd->base.dest.port_num); | ||
| 1967 | if (cmd->base.dest.is_global) { | 1969 | if (cmd->base.dest.is_global) { |
| 1968 | rdma_ah_set_grh(&attr->ah_attr, NULL, | 1970 | rdma_ah_set_grh(&attr->ah_attr, NULL, |
| 1969 | cmd->base.dest.flow_label, | 1971 | cmd->base.dest.flow_label, |
| @@ -1981,8 +1983,9 @@ static int modify_qp(struct ib_uverbs_file *file, | |||
| 1981 | rdma_ah_set_port_num(&attr->ah_attr, | 1983 | rdma_ah_set_port_num(&attr->ah_attr, |
| 1982 | cmd->base.dest.port_num); | 1984 | cmd->base.dest.port_num); |
| 1983 | 1985 | ||
| 1984 | attr->alt_ah_attr.type = rdma_ah_find_type(qp->device, | 1986 | if (cmd->base.attr_mask & IB_QP_ALT_PATH) |
| 1985 | cmd->base.dest.port_num); | 1987 | attr->alt_ah_attr.type = |
| 1988 | rdma_ah_find_type(qp->device, cmd->base.dest.port_num); | ||
| 1986 | if (cmd->base.alt_dest.is_global) { | 1989 | if (cmd->base.alt_dest.is_global) { |
| 1987 | rdma_ah_set_grh(&attr->alt_ah_attr, NULL, | 1990 | rdma_ah_set_grh(&attr->alt_ah_attr, NULL, |
| 1988 | cmd->base.alt_dest.flow_label, | 1991 | cmd->base.alt_dest.flow_label, |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index c023e2c81b8f..5e530d2bee44 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
| @@ -1153,7 +1153,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, | |||
| 1153 | kref_get(&file->ref); | 1153 | kref_get(&file->ref); |
| 1154 | mutex_unlock(&uverbs_dev->lists_mutex); | 1154 | mutex_unlock(&uverbs_dev->lists_mutex); |
| 1155 | 1155 | ||
| 1156 | ib_uverbs_event_handler(&file->event_handler, &event); | ||
| 1157 | 1156 | ||
| 1158 | mutex_lock(&file->cleanup_mutex); | 1157 | mutex_lock(&file->cleanup_mutex); |
| 1159 | ucontext = file->ucontext; | 1158 | ucontext = file->ucontext; |
| @@ -1170,6 +1169,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, | |||
| 1170 | * for example due to freeing the resources | 1169 | * for example due to freeing the resources |
| 1171 | * (e.g mmput). | 1170 | * (e.g mmput). |
| 1172 | */ | 1171 | */ |
| 1172 | ib_uverbs_event_handler(&file->event_handler, &event); | ||
| 1173 | ib_dev->disassociate_ucontext(ucontext); | 1173 | ib_dev->disassociate_ucontext(ucontext); |
| 1174 | mutex_lock(&file->cleanup_mutex); | 1174 | mutex_lock(&file->cleanup_mutex); |
| 1175 | ib_uverbs_cleanup_ucontext(file, ucontext, true); | 1175 | ib_uverbs_cleanup_ucontext(file, ucontext, true); |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 7f8fe443df46..b456e3ca1876 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
| @@ -838,6 +838,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, | |||
| 838 | spin_lock_init(&qp->mr_lock); | 838 | spin_lock_init(&qp->mr_lock); |
| 839 | INIT_LIST_HEAD(&qp->rdma_mrs); | 839 | INIT_LIST_HEAD(&qp->rdma_mrs); |
| 840 | INIT_LIST_HEAD(&qp->sig_mrs); | 840 | INIT_LIST_HEAD(&qp->sig_mrs); |
| 841 | qp->port = 0; | ||
| 841 | 842 | ||
| 842 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) | 843 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) |
| 843 | return ib_create_xrc_qp(qp, qp_init_attr); | 844 | return ib_create_xrc_qp(qp, qp_init_attr); |
| @@ -1297,7 +1298,11 @@ int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr, | |||
| 1297 | if (ret) | 1298 | if (ret) |
| 1298 | return ret; | 1299 | return ret; |
| 1299 | } | 1300 | } |
| 1300 | return ib_security_modify_qp(qp, attr, attr_mask, udata); | 1301 | ret = ib_security_modify_qp(qp, attr, attr_mask, udata); |
| 1302 | if (!ret && (attr_mask & IB_QP_PORT)) | ||
| 1303 | qp->port = attr->port_num; | ||
| 1304 | |||
| 1305 | return ret; | ||
| 1301 | } | 1306 | } |
| 1302 | EXPORT_SYMBOL(ib_modify_qp_with_udata); | 1307 | EXPORT_SYMBOL(ib_modify_qp_with_udata); |
| 1303 | 1308 | ||
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 5332f06b99ba..c2fba76becd4 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
| @@ -661,7 +661,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, | |||
| 661 | rhp = php->rhp; | 661 | rhp = php->rhp; |
| 662 | 662 | ||
| 663 | if (mr_type != IB_MR_TYPE_MEM_REG || | 663 | if (mr_type != IB_MR_TYPE_MEM_REG || |
| 664 | max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl && | 664 | max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl && |
| 665 | use_dsgl)) | 665 | use_dsgl)) |
| 666 | return ERR_PTR(-EINVAL); | 666 | return ERR_PTR(-EINVAL); |
| 667 | 667 | ||
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index f78a733a63ec..d545302b8ef8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c | |||
| @@ -64,8 +64,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, | |||
| 64 | } else { | 64 | } else { |
| 65 | u8 *dmac = rdma_ah_retrieve_dmac(ah_attr); | 65 | u8 *dmac = rdma_ah_retrieve_dmac(ah_attr); |
| 66 | 66 | ||
| 67 | if (!dmac) | 67 | if (!dmac) { |
| 68 | kfree(ah); | ||
| 68 | return ERR_PTR(-EINVAL); | 69 | return ERR_PTR(-EINVAL); |
| 70 | } | ||
| 69 | memcpy(ah->av.mac, dmac, ETH_ALEN); | 71 | memcpy(ah->av.mac, dmac, ETH_ALEN); |
| 70 | } | 72 | } |
| 71 | 73 | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 9ec1ae9a82c9..a49ff2eb6fb3 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c | |||
| @@ -130,20 +130,32 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( | |||
| 130 | u64 base = 0; | 130 | u64 base = 0; |
| 131 | u32 i, j; | 131 | u32 i, j; |
| 132 | u32 k = 0; | 132 | u32 k = 0; |
| 133 | u32 low; | ||
| 134 | 133 | ||
| 135 | /* copy base values in obj_info */ | 134 | /* copy base values in obj_info */ |
| 136 | for (i = I40IW_HMC_IW_QP, j = 0; | 135 | for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) { |
| 137 | i <= I40IW_HMC_IW_PBLE; i++, j += 8) { | 136 | if ((i == I40IW_HMC_IW_SRQ) || |
| 137 | (i == I40IW_HMC_IW_FSIMC) || | ||
| 138 | (i == I40IW_HMC_IW_FSIAV)) { | ||
| 139 | info[i].base = 0; | ||
| 140 | info[i].cnt = 0; | ||
| 141 | continue; | ||
| 142 | } | ||
| 138 | get_64bit_val(buf, j, &temp); | 143 | get_64bit_val(buf, j, &temp); |
| 139 | info[i].base = RS_64_1(temp, 32) * 512; | 144 | info[i].base = RS_64_1(temp, 32) * 512; |
| 140 | if (info[i].base > base) { | 145 | if (info[i].base > base) { |
| 141 | base = info[i].base; | 146 | base = info[i].base; |
| 142 | k = i; | 147 | k = i; |
| 143 | } | 148 | } |
| 144 | low = (u32)(temp); | 149 | if (i == I40IW_HMC_IW_APBVT_ENTRY) { |
| 145 | if (low) | 150 | info[i].cnt = 1; |
| 146 | info[i].cnt = low; | 151 | continue; |
| 152 | } | ||
| 153 | if (i == I40IW_HMC_IW_QP) | ||
| 154 | info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); | ||
| 155 | else if (i == I40IW_HMC_IW_CQ) | ||
| 156 | info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); | ||
| 157 | else | ||
| 158 | info[i].cnt = (u32)(temp); | ||
| 147 | } | 159 | } |
| 148 | size = info[k].cnt * info[k].size + info[k].base; | 160 | size = info[k].cnt * info[k].size + info[k].base; |
| 149 | if (size & 0x1FFFFF) | 161 | if (size & 0x1FFFFF) |
| @@ -155,6 +167,31 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( | |||
| 155 | } | 167 | } |
| 156 | 168 | ||
| 157 | /** | 169 | /** |
| 170 | * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size | ||
| 171 | * @buf: ptr to fpm query buffer | ||
| 172 | * @buf_idx: index into buf | ||
| 173 | * @info: ptr to i40iw_hmc_obj_info struct | ||
| 174 | * @rsrc_idx: resource index into info | ||
| 175 | * | ||
| 176 | * Decode a 64 bit value from fpm query buffer into max count and size | ||
| 177 | */ | ||
| 178 | static u64 i40iw_sc_decode_fpm_query(u64 *buf, | ||
| 179 | u32 buf_idx, | ||
| 180 | struct i40iw_hmc_obj_info *obj_info, | ||
| 181 | u32 rsrc_idx) | ||
| 182 | { | ||
| 183 | u64 temp; | ||
| 184 | u32 size; | ||
| 185 | |||
| 186 | get_64bit_val(buf, buf_idx, &temp); | ||
| 187 | obj_info[rsrc_idx].max_cnt = (u32)temp; | ||
| 188 | size = (u32)RS_64_1(temp, 32); | ||
| 189 | obj_info[rsrc_idx].size = LS_64_1(1, size); | ||
| 190 | |||
| 191 | return temp; | ||
| 192 | } | ||
| 193 | |||
| 194 | /** | ||
| 158 | * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer | 195 | * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer |
| 159 | * @buf: ptr to fpm query buffer | 196 | * @buf: ptr to fpm query buffer |
| 160 | * @info: ptr to i40iw_hmc_obj_info struct | 197 | * @info: ptr to i40iw_hmc_obj_info struct |
| @@ -168,9 +205,9 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( | |||
| 168 | struct i40iw_hmc_info *hmc_info, | 205 | struct i40iw_hmc_info *hmc_info, |
| 169 | struct i40iw_hmc_fpm_misc *hmc_fpm_misc) | 206 | struct i40iw_hmc_fpm_misc *hmc_fpm_misc) |
| 170 | { | 207 | { |
| 171 | u64 temp; | ||
| 172 | struct i40iw_hmc_obj_info *obj_info; | 208 | struct i40iw_hmc_obj_info *obj_info; |
| 173 | u32 i, j, size; | 209 | u64 temp; |
| 210 | u32 size; | ||
| 174 | u16 max_pe_sds; | 211 | u16 max_pe_sds; |
| 175 | 212 | ||
| 176 | obj_info = hmc_info->hmc_obj; | 213 | obj_info = hmc_info->hmc_obj; |
| @@ -185,41 +222,52 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( | |||
| 185 | hmc_fpm_misc->max_sds = max_pe_sds; | 222 | hmc_fpm_misc->max_sds = max_pe_sds; |
| 186 | hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; | 223 | hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; |
| 187 | 224 | ||
| 188 | for (i = I40IW_HMC_IW_QP, j = 8; | 225 | get_64bit_val(buf, 8, &temp); |
| 189 | i <= I40IW_HMC_IW_ARP; i++, j += 8) { | 226 | obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); |
| 190 | get_64bit_val(buf, j, &temp); | 227 | size = (u32)RS_64_1(temp, 32); |
| 191 | if (i == I40IW_HMC_IW_QP) | 228 | obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size); |
| 192 | obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); | ||
| 193 | else if (i == I40IW_HMC_IW_CQ) | ||
| 194 | obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); | ||
| 195 | else | ||
| 196 | obj_info[i].max_cnt = (u32)temp; | ||
| 197 | 229 | ||
| 198 | size = (u32)RS_64_1(temp, 32); | 230 | get_64bit_val(buf, 16, &temp); |
| 199 | obj_info[i].size = ((u64)1 << size); | 231 | obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); |
| 200 | } | 232 | size = (u32)RS_64_1(temp, 32); |
| 201 | for (i = I40IW_HMC_IW_MR, j = 48; | 233 | obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size); |
| 202 | i <= I40IW_HMC_IW_PBLE; i++, j += 8) { | 234 | |
| 203 | get_64bit_val(buf, j, &temp); | 235 | i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE); |
| 204 | obj_info[i].max_cnt = (u32)temp; | 236 | i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP); |
| 205 | size = (u32)RS_64_1(temp, 32); | 237 | |
| 206 | obj_info[i].size = LS_64_1(1, size); | 238 | obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192; |
| 207 | } | 239 | obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1; |
| 240 | |||
| 241 | i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR); | ||
| 242 | i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF); | ||
| 208 | 243 | ||
| 209 | get_64bit_val(buf, 120, &temp); | ||
| 210 | hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS); | ||
| 211 | get_64bit_val(buf, 120, &temp); | ||
| 212 | hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER); | ||
| 213 | get_64bit_val(buf, 120, &temp); | ||
| 214 | hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET); | ||
| 215 | get_64bit_val(buf, 64, &temp); | 244 | get_64bit_val(buf, 64, &temp); |
| 245 | obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp; | ||
| 246 | obj_info[I40IW_HMC_IW_XFFL].size = 4; | ||
| 216 | hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE); | 247 | hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE); |
| 217 | if (!hmc_fpm_misc->xf_block_size) | 248 | if (!hmc_fpm_misc->xf_block_size) |
| 218 | return I40IW_ERR_INVALID_SIZE; | 249 | return I40IW_ERR_INVALID_SIZE; |
| 250 | |||
| 251 | i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1); | ||
| 252 | |||
| 219 | get_64bit_val(buf, 80, &temp); | 253 | get_64bit_val(buf, 80, &temp); |
| 254 | obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp; | ||
| 255 | obj_info[I40IW_HMC_IW_Q1FL].size = 4; | ||
| 220 | hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE); | 256 | hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE); |
| 221 | if (!hmc_fpm_misc->q1_block_size) | 257 | if (!hmc_fpm_misc->q1_block_size) |
| 222 | return I40IW_ERR_INVALID_SIZE; | 258 | return I40IW_ERR_INVALID_SIZE; |
| 259 | |||
| 260 | i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER); | ||
| 261 | |||
| 262 | get_64bit_val(buf, 112, &temp); | ||
| 263 | obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp; | ||
| 264 | obj_info[I40IW_HMC_IW_PBLE].size = 8; | ||
| 265 | |||
| 266 | get_64bit_val(buf, 120, &temp); | ||
| 267 | hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS); | ||
| 268 | hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER); | ||
| 269 | hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET); | ||
| 270 | |||
| 223 | return 0; | 271 | return 0; |
| 224 | } | 272 | } |
| 225 | 273 | ||
| @@ -3392,13 +3440,6 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_ | |||
| 3392 | hmc_info->sd_table.sd_entry = virt_mem.va; | 3440 | hmc_info->sd_table.sd_entry = virt_mem.va; |
| 3393 | } | 3441 | } |
| 3394 | 3442 | ||
| 3395 | /* fill size of objects which are fixed */ | ||
| 3396 | hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4; | ||
| 3397 | hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4; | ||
| 3398 | hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8; | ||
| 3399 | hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192; | ||
| 3400 | hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1; | ||
| 3401 | |||
| 3402 | return ret_code; | 3443 | return ret_code; |
| 3403 | } | 3444 | } |
| 3404 | 3445 | ||
| @@ -4840,7 +4881,7 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi) | |||
| 4840 | { | 4881 | { |
| 4841 | u8 fcn_id = vsi->fcn_id; | 4882 | u8 fcn_id = vsi->fcn_id; |
| 4842 | 4883 | ||
| 4843 | if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID)) | 4884 | if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT) |
| 4844 | vsi->dev->fcn_id_array[fcn_id] = false; | 4885 | vsi->dev->fcn_id_array[fcn_id] = false; |
| 4845 | i40iw_hw_stats_stop_timer(vsi); | 4886 | i40iw_hw_stats_stop_timer(vsi); |
| 4846 | } | 4887 | } |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h index a39ac12b6a7e..2ebaadbed379 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_d.h +++ b/drivers/infiniband/hw/i40iw/i40iw_d.h | |||
| @@ -1507,8 +1507,8 @@ enum { | |||
| 1507 | I40IW_CQ0_ALIGNMENT_MASK = (256 - 1), | 1507 | I40IW_CQ0_ALIGNMENT_MASK = (256 - 1), |
| 1508 | I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1), | 1508 | I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1), |
| 1509 | I40IW_SHADOWAREA_MASK = (128 - 1), | 1509 | I40IW_SHADOWAREA_MASK = (128 - 1), |
| 1510 | I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = 0, | 1510 | I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = (4 - 1), |
| 1511 | I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = 0 | 1511 | I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = (4 - 1) |
| 1512 | }; | 1512 | }; |
| 1513 | 1513 | ||
| 1514 | enum i40iw_alignment { | 1514 | enum i40iw_alignment { |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c index 71050c5d29a0..7f5583d83622 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c | |||
| @@ -685,7 +685,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc) | |||
| 685 | cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe)); | 685 | cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe)); |
| 686 | tsize = cqsize + sizeof(struct i40iw_cq_shadow_area); | 686 | tsize = cqsize + sizeof(struct i40iw_cq_shadow_area); |
| 687 | ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize, | 687 | ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize, |
| 688 | I40IW_CQ0_ALIGNMENT_MASK); | 688 | I40IW_CQ0_ALIGNMENT); |
| 689 | if (ret) | 689 | if (ret) |
| 690 | return ret; | 690 | return ret; |
| 691 | 691 | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h index 91c421762f06..f7013f11d808 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_status.h +++ b/drivers/infiniband/hw/i40iw/i40iw_status.h | |||
| @@ -62,7 +62,7 @@ enum i40iw_status_code { | |||
| 62 | I40IW_ERR_INVALID_ALIGNMENT = -23, | 62 | I40IW_ERR_INVALID_ALIGNMENT = -23, |
| 63 | I40IW_ERR_FLUSHED_QUEUE = -24, | 63 | I40IW_ERR_FLUSHED_QUEUE = -24, |
| 64 | I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25, | 64 | I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25, |
| 65 | I40IW_ERR_INVALID_IMM_DATA_SIZE = -26, | 65 | I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26, |
| 66 | I40IW_ERR_TIMEOUT = -27, | 66 | I40IW_ERR_TIMEOUT = -27, |
| 67 | I40IW_ERR_OPCODE_MISMATCH = -28, | 67 | I40IW_ERR_OPCODE_MISMATCH = -28, |
| 68 | I40IW_ERR_CQP_COMPL_ERROR = -29, | 68 | I40IW_ERR_CQP_COMPL_ERROR = -29, |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c index b0d3a0e8a9b5..1060725d18bc 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_uk.c +++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c | |||
| @@ -435,7 +435,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp, | |||
| 435 | 435 | ||
| 436 | op_info = &info->op.inline_rdma_write; | 436 | op_info = &info->op.inline_rdma_write; |
| 437 | if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) | 437 | if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) |
| 438 | return I40IW_ERR_INVALID_IMM_DATA_SIZE; | 438 | return I40IW_ERR_INVALID_INLINE_DATA_SIZE; |
| 439 | 439 | ||
| 440 | ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); | 440 | ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); |
| 441 | if (ret_code) | 441 | if (ret_code) |
| @@ -511,7 +511,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp, | |||
| 511 | 511 | ||
| 512 | op_info = &info->op.inline_send; | 512 | op_info = &info->op.inline_send; |
| 513 | if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) | 513 | if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) |
| 514 | return I40IW_ERR_INVALID_IMM_DATA_SIZE; | 514 | return I40IW_ERR_INVALID_INLINE_DATA_SIZE; |
| 515 | 515 | ||
| 516 | ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); | 516 | ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); |
| 517 | if (ret_code) | 517 | if (ret_code) |
| @@ -784,7 +784,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq, | |||
| 784 | get_64bit_val(cqe, 0, &qword0); | 784 | get_64bit_val(cqe, 0, &qword0); |
| 785 | get_64bit_val(cqe, 16, &qword2); | 785 | get_64bit_val(cqe, 16, &qword2); |
| 786 | 786 | ||
| 787 | info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM); | 787 | info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM); |
| 788 | 788 | ||
| 789 | info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); | 789 | info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); |
| 790 | 790 | ||
| @@ -1187,7 +1187,7 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size, | |||
| 1187 | u8 *wqe_size) | 1187 | u8 *wqe_size) |
| 1188 | { | 1188 | { |
| 1189 | if (data_size > I40IW_MAX_INLINE_DATA_SIZE) | 1189 | if (data_size > I40IW_MAX_INLINE_DATA_SIZE) |
| 1190 | return I40IW_ERR_INVALID_IMM_DATA_SIZE; | 1190 | return I40IW_ERR_INVALID_INLINE_DATA_SIZE; |
| 1191 | 1191 | ||
| 1192 | if (data_size <= 16) | 1192 | if (data_size <= 16) |
| 1193 | *wqe_size = I40IW_QP_WQE_MIN_SIZE; | 1193 | *wqe_size = I40IW_QP_WQE_MIN_SIZE; |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index a7f2e60085c4..f7fcde1ff0aa 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -1085,6 +1085,12 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, | |||
| 1085 | bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == | 1085 | bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == |
| 1086 | IB_LINK_LAYER_INFINIBAND); | 1086 | IB_LINK_LAYER_INFINIBAND); |
| 1087 | 1087 | ||
| 1088 | /* CM layer calls ib_modify_port() regardless of the link layer. For | ||
| 1089 | * Ethernet ports, qkey violation and Port capabilities are meaningless. | ||
| 1090 | */ | ||
| 1091 | if (!is_ib) | ||
| 1092 | return 0; | ||
| 1093 | |||
| 1088 | if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { | 1094 | if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { |
| 1089 | change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; | 1095 | change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; |
| 1090 | value = ~props->clr_port_cap_mask | props->set_port_cap_mask; | 1096 | value = ~props->clr_port_cap_mask | props->set_port_cap_mask; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 0889ff367c86..f58f8f5f3ebe 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
| @@ -1238,6 +1238,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
| 1238 | goto err_destroy_tis; | 1238 | goto err_destroy_tis; |
| 1239 | 1239 | ||
| 1240 | sq->base.container_mibqp = qp; | 1240 | sq->base.container_mibqp = qp; |
| 1241 | sq->base.mqp.event = mlx5_ib_qp_event; | ||
| 1241 | } | 1242 | } |
| 1242 | 1243 | ||
| 1243 | if (qp->rq.wqe_cnt) { | 1244 | if (qp->rq.wqe_cnt) { |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index 69bda611d313..90aa326fd7c0 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | |||
| @@ -65,13 +65,28 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq, | |||
| 65 | struct pvrdma_dev *dev = to_vdev(ibcq->device); | 65 | struct pvrdma_dev *dev = to_vdev(ibcq->device); |
| 66 | struct pvrdma_cq *cq = to_vcq(ibcq); | 66 | struct pvrdma_cq *cq = to_vcq(ibcq); |
| 67 | u32 val = cq->cq_handle; | 67 | u32 val = cq->cq_handle; |
| 68 | unsigned long flags; | ||
| 69 | int has_data = 0; | ||
| 68 | 70 | ||
| 69 | val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? | 71 | val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? |
| 70 | PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM; | 72 | PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM; |
| 71 | 73 | ||
| 74 | spin_lock_irqsave(&cq->cq_lock, flags); | ||
| 75 | |||
| 72 | pvrdma_write_uar_cq(dev, val); | 76 | pvrdma_write_uar_cq(dev, val); |
| 73 | 77 | ||
| 74 | return 0; | 78 | if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) { |
| 79 | unsigned int head; | ||
| 80 | |||
| 81 | has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx, | ||
| 82 | cq->ibcq.cqe, &head); | ||
| 83 | if (unlikely(has_data == PVRDMA_INVALID_IDX)) | ||
| 84 | dev_err(&dev->pdev->dev, "CQ ring state invalid\n"); | ||
| 85 | } | ||
| 86 | |||
| 87 | spin_unlock_irqrestore(&cq->cq_lock, flags); | ||
| 88 | |||
| 89 | return has_data; | ||
| 75 | } | 90 | } |
| 76 | 91 | ||
| 77 | /** | 92 | /** |
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 3b616cb7c67f..714cf7f9b138 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c | |||
| @@ -1248,6 +1248,10 @@ static const struct acpi_device_id elan_acpi_id[] = { | |||
| 1248 | { "ELAN0100", 0 }, | 1248 | { "ELAN0100", 0 }, |
| 1249 | { "ELAN0600", 0 }, | 1249 | { "ELAN0600", 0 }, |
| 1250 | { "ELAN0605", 0 }, | 1250 | { "ELAN0605", 0 }, |
| 1251 | { "ELAN0608", 0 }, | ||
| 1252 | { "ELAN0605", 0 }, | ||
| 1253 | { "ELAN0609", 0 }, | ||
| 1254 | { "ELAN060B", 0 }, | ||
| 1251 | { "ELAN1000", 0 }, | 1255 | { "ELAN1000", 0 }, |
| 1252 | { } | 1256 | { } |
| 1253 | }; | 1257 | }; |
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 922ea02edcc3..20b5b21c1bba 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c | |||
| @@ -380,8 +380,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties) | |||
| 380 | return 0; | 380 | return 0; |
| 381 | 381 | ||
| 382 | if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) { | 382 | if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) { |
| 383 | psmouse_warn(psmouse, "failed to get extended button data\n"); | 383 | psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n"); |
| 384 | button_info = 0; | 384 | button_info = 0x33; |
| 385 | } | 385 | } |
| 386 | 386 | ||
| 387 | psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL); | 387 | psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL); |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index b97188acc4f1..2d80fa8a0634 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
| @@ -1519,6 +1519,13 @@ static int arm_smmu_add_device(struct device *dev) | |||
| 1519 | 1519 | ||
| 1520 | if (using_legacy_binding) { | 1520 | if (using_legacy_binding) { |
| 1521 | ret = arm_smmu_register_legacy_master(dev, &smmu); | 1521 | ret = arm_smmu_register_legacy_master(dev, &smmu); |
| 1522 | |||
| 1523 | /* | ||
| 1524 | * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master() | ||
| 1525 | * will allocate/initialise a new one. Thus we need to update fwspec for | ||
| 1526 | * later use. | ||
| 1527 | */ | ||
| 1528 | fwspec = dev->iommu_fwspec; | ||
| 1522 | if (ret) | 1529 | if (ret) |
| 1523 | goto out_free; | 1530 | goto out_free; |
| 1524 | } else if (fwspec && fwspec->ops == &arm_smmu_ops) { | 1531 | } else if (fwspec && fwspec->ops == &arm_smmu_ops) { |
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c index 28b26c80f4cf..072bd227b6c6 100644 --- a/drivers/irqchip/irq-atmel-aic-common.c +++ b/drivers/irqchip/irq-atmel-aic-common.c | |||
| @@ -137,14 +137,14 @@ static void __init aic_common_ext_irq_of_init(struct irq_domain *domain) | |||
| 137 | #define AT91_RTC_IMR 0x28 | 137 | #define AT91_RTC_IMR 0x28 |
| 138 | #define AT91_RTC_IRQ_MASK 0x1f | 138 | #define AT91_RTC_IRQ_MASK 0x1f |
| 139 | 139 | ||
| 140 | void __init aic_common_rtc_irq_fixup(struct device_node *root) | 140 | void __init aic_common_rtc_irq_fixup(void) |
| 141 | { | 141 | { |
| 142 | struct device_node *np; | 142 | struct device_node *np; |
| 143 | void __iomem *regs; | 143 | void __iomem *regs; |
| 144 | 144 | ||
| 145 | np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc"); | 145 | np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc"); |
| 146 | if (!np) | 146 | if (!np) |
| 147 | np = of_find_compatible_node(root, NULL, | 147 | np = of_find_compatible_node(NULL, NULL, |
| 148 | "atmel,at91sam9x5-rtc"); | 148 | "atmel,at91sam9x5-rtc"); |
| 149 | 149 | ||
| 150 | if (!np) | 150 | if (!np) |
| @@ -165,7 +165,7 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root) | |||
| 165 | #define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */ | 165 | #define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */ |
| 166 | #define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */ | 166 | #define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */ |
| 167 | 167 | ||
| 168 | void __init aic_common_rtt_irq_fixup(struct device_node *root) | 168 | void __init aic_common_rtt_irq_fixup(void) |
| 169 | { | 169 | { |
| 170 | struct device_node *np; | 170 | struct device_node *np; |
| 171 | void __iomem *regs; | 171 | void __iomem *regs; |
| @@ -196,11 +196,10 @@ static void __init aic_common_irq_fixup(const struct of_device_id *matches) | |||
| 196 | return; | 196 | return; |
| 197 | 197 | ||
| 198 | match = of_match_node(matches, root); | 198 | match = of_match_node(matches, root); |
| 199 | of_node_put(root); | ||
| 200 | 199 | ||
| 201 | if (match) { | 200 | if (match) { |
| 202 | void (*fixup)(struct device_node *) = match->data; | 201 | void (*fixup)(void) = match->data; |
| 203 | fixup(root); | 202 | fixup(); |
| 204 | } | 203 | } |
| 205 | 204 | ||
| 206 | of_node_put(root); | 205 | of_node_put(root); |
diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h index af60376d50de..242e62c1851e 100644 --- a/drivers/irqchip/irq-atmel-aic-common.h +++ b/drivers/irqchip/irq-atmel-aic-common.h | |||
| @@ -33,8 +33,8 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node, | |||
| 33 | const char *name, int nirqs, | 33 | const char *name, int nirqs, |
| 34 | const struct of_device_id *matches); | 34 | const struct of_device_id *matches); |
| 35 | 35 | ||
| 36 | void __init aic_common_rtc_irq_fixup(struct device_node *root); | 36 | void __init aic_common_rtc_irq_fixup(void); |
| 37 | 37 | ||
| 38 | void __init aic_common_rtt_irq_fixup(struct device_node *root); | 38 | void __init aic_common_rtt_irq_fixup(void); |
| 39 | 39 | ||
| 40 | #endif /* __IRQ_ATMEL_AIC_COMMON_H */ | 40 | #endif /* __IRQ_ATMEL_AIC_COMMON_H */ |
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c index 37f952dd9fc9..bb1ad451392f 100644 --- a/drivers/irqchip/irq-atmel-aic.c +++ b/drivers/irqchip/irq-atmel-aic.c | |||
| @@ -209,20 +209,20 @@ static const struct irq_domain_ops aic_irq_ops = { | |||
| 209 | .xlate = aic_irq_domain_xlate, | 209 | .xlate = aic_irq_domain_xlate, |
| 210 | }; | 210 | }; |
| 211 | 211 | ||
| 212 | static void __init at91rm9200_aic_irq_fixup(struct device_node *root) | 212 | static void __init at91rm9200_aic_irq_fixup(void) |
| 213 | { | 213 | { |
| 214 | aic_common_rtc_irq_fixup(root); | 214 | aic_common_rtc_irq_fixup(); |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | static void __init at91sam9260_aic_irq_fixup(struct device_node *root) | 217 | static void __init at91sam9260_aic_irq_fixup(void) |
| 218 | { | 218 | { |
| 219 | aic_common_rtt_irq_fixup(root); | 219 | aic_common_rtt_irq_fixup(); |
| 220 | } | 220 | } |
| 221 | 221 | ||
| 222 | static void __init at91sam9g45_aic_irq_fixup(struct device_node *root) | 222 | static void __init at91sam9g45_aic_irq_fixup(void) |
| 223 | { | 223 | { |
| 224 | aic_common_rtc_irq_fixup(root); | 224 | aic_common_rtc_irq_fixup(); |
| 225 | aic_common_rtt_irq_fixup(root); | 225 | aic_common_rtt_irq_fixup(); |
| 226 | } | 226 | } |
| 227 | 227 | ||
| 228 | static const struct of_device_id aic_irq_fixups[] __initconst = { | 228 | static const struct of_device_id aic_irq_fixups[] __initconst = { |
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index c04ee9a23d09..6acad2ea0fb3 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c | |||
| @@ -305,9 +305,9 @@ static const struct irq_domain_ops aic5_irq_ops = { | |||
| 305 | .xlate = aic5_irq_domain_xlate, | 305 | .xlate = aic5_irq_domain_xlate, |
| 306 | }; | 306 | }; |
| 307 | 307 | ||
| 308 | static void __init sama5d3_aic_irq_fixup(struct device_node *root) | 308 | static void __init sama5d3_aic_irq_fixup(void) |
| 309 | { | 309 | { |
| 310 | aic_common_rtc_irq_fixup(root); | 310 | aic_common_rtc_irq_fixup(); |
| 311 | } | 311 | } |
| 312 | 312 | ||
| 313 | static const struct of_device_id aic5_irq_fixups[] __initconst = { | 313 | static const struct of_device_id aic5_irq_fixups[] __initconst = { |
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index bddf169c4b37..b009b916a292 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c | |||
| @@ -189,6 +189,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, | |||
| 189 | 189 | ||
| 190 | ct->chip.irq_suspend = brcmstb_l2_intc_suspend; | 190 | ct->chip.irq_suspend = brcmstb_l2_intc_suspend; |
| 191 | ct->chip.irq_resume = brcmstb_l2_intc_resume; | 191 | ct->chip.irq_resume = brcmstb_l2_intc_resume; |
| 192 | ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend; | ||
| 192 | 193 | ||
| 193 | if (data->can_wake) { | 194 | if (data->can_wake) { |
| 194 | /* This IRQ chip can wake the system, set all child interrupts | 195 | /* This IRQ chip can wake the system, set all child interrupts |
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c index 249240d9a425..833a90fe33ae 100644 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c | |||
| @@ -43,6 +43,7 @@ static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev, | |||
| 43 | *dev_id = args.args[0]; | 43 | *dev_id = args.args[0]; |
| 44 | break; | 44 | break; |
| 45 | } | 45 | } |
| 46 | index++; | ||
| 46 | } while (!ret); | 47 | } while (!ret); |
| 47 | 48 | ||
| 48 | return ret; | 49 | return ret; |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 68932873eebc..284738add89b 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -1835,7 +1835,7 @@ static int __init its_of_probe(struct device_node *node) | |||
| 1835 | 1835 | ||
| 1836 | #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) | 1836 | #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) |
| 1837 | 1837 | ||
| 1838 | #if defined(CONFIG_ACPI_NUMA) && (ACPI_CA_VERSION >= 0x20170531) | 1838 | #ifdef CONFIG_ACPI_NUMA |
| 1839 | struct its_srat_map { | 1839 | struct its_srat_map { |
| 1840 | /* numa node id */ | 1840 | /* numa node id */ |
| 1841 | u32 numa_node; | 1841 | u32 numa_node; |
| @@ -1843,7 +1843,7 @@ struct its_srat_map { | |||
| 1843 | u32 its_id; | 1843 | u32 its_id; |
| 1844 | }; | 1844 | }; |
| 1845 | 1845 | ||
| 1846 | static struct its_srat_map its_srat_maps[MAX_NUMNODES] __initdata; | 1846 | static struct its_srat_map *its_srat_maps __initdata; |
| 1847 | static int its_in_srat __initdata; | 1847 | static int its_in_srat __initdata; |
| 1848 | 1848 | ||
| 1849 | static int __init acpi_get_its_numa_node(u32 its_id) | 1849 | static int __init acpi_get_its_numa_node(u32 its_id) |
| @@ -1857,6 +1857,12 @@ static int __init acpi_get_its_numa_node(u32 its_id) | |||
| 1857 | return NUMA_NO_NODE; | 1857 | return NUMA_NO_NODE; |
| 1858 | } | 1858 | } |
| 1859 | 1859 | ||
| 1860 | static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header, | ||
| 1861 | const unsigned long end) | ||
| 1862 | { | ||
| 1863 | return 0; | ||
| 1864 | } | ||
| 1865 | |||
| 1860 | static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, | 1866 | static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, |
| 1861 | const unsigned long end) | 1867 | const unsigned long end) |
| 1862 | { | 1868 | { |
| @@ -1873,12 +1879,6 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, | |||
| 1873 | return -EINVAL; | 1879 | return -EINVAL; |
| 1874 | } | 1880 | } |
| 1875 | 1881 | ||
| 1876 | if (its_in_srat >= MAX_NUMNODES) { | ||
| 1877 | pr_err("SRAT: ITS affinity exceeding max count[%d]\n", | ||
| 1878 | MAX_NUMNODES); | ||
| 1879 | return -EINVAL; | ||
| 1880 | } | ||
| 1881 | |||
| 1882 | node = acpi_map_pxm_to_node(its_affinity->proximity_domain); | 1882 | node = acpi_map_pxm_to_node(its_affinity->proximity_domain); |
| 1883 | 1883 | ||
| 1884 | if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { | 1884 | if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { |
| @@ -1897,14 +1897,37 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, | |||
| 1897 | 1897 | ||
| 1898 | static void __init acpi_table_parse_srat_its(void) | 1898 | static void __init acpi_table_parse_srat_its(void) |
| 1899 | { | 1899 | { |
| 1900 | int count; | ||
| 1901 | |||
| 1902 | count = acpi_table_parse_entries(ACPI_SIG_SRAT, | ||
| 1903 | sizeof(struct acpi_table_srat), | ||
| 1904 | ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, | ||
| 1905 | gic_acpi_match_srat_its, 0); | ||
| 1906 | if (count <= 0) | ||
| 1907 | return; | ||
| 1908 | |||
| 1909 | its_srat_maps = kmalloc(count * sizeof(struct its_srat_map), | ||
| 1910 | GFP_KERNEL); | ||
| 1911 | if (!its_srat_maps) { | ||
| 1912 | pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); | ||
| 1913 | return; | ||
| 1914 | } | ||
| 1915 | |||
| 1900 | acpi_table_parse_entries(ACPI_SIG_SRAT, | 1916 | acpi_table_parse_entries(ACPI_SIG_SRAT, |
| 1901 | sizeof(struct acpi_table_srat), | 1917 | sizeof(struct acpi_table_srat), |
| 1902 | ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, | 1918 | ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, |
| 1903 | gic_acpi_parse_srat_its, 0); | 1919 | gic_acpi_parse_srat_its, 0); |
| 1904 | } | 1920 | } |
| 1921 | |||
| 1922 | /* free the its_srat_maps after ITS probing */ | ||
| 1923 | static void __init acpi_its_srat_maps_free(void) | ||
| 1924 | { | ||
| 1925 | kfree(its_srat_maps); | ||
| 1926 | } | ||
| 1905 | #else | 1927 | #else |
| 1906 | static void __init acpi_table_parse_srat_its(void) { } | 1928 | static void __init acpi_table_parse_srat_its(void) { } |
| 1907 | static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } | 1929 | static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } |
| 1930 | static void __init acpi_its_srat_maps_free(void) { } | ||
| 1908 | #endif | 1931 | #endif |
| 1909 | 1932 | ||
| 1910 | static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, | 1933 | static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, |
| @@ -1951,6 +1974,7 @@ static void __init its_acpi_probe(void) | |||
| 1951 | acpi_table_parse_srat_its(); | 1974 | acpi_table_parse_srat_its(); |
| 1952 | acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, | 1975 | acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, |
| 1953 | gic_acpi_parse_madt_its, 0); | 1976 | gic_acpi_parse_madt_its, 0); |
| 1977 | acpi_its_srat_maps_free(); | ||
| 1954 | } | 1978 | } |
| 1955 | #else | 1979 | #else |
| 1956 | static void __init its_acpi_probe(void) { } | 1980 | static void __init its_acpi_probe(void) { } |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index dbffb7ab6203..984c3ecfd22c 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
| @@ -353,6 +353,8 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs | |||
| 353 | 353 | ||
| 354 | if (static_key_true(&supports_deactivate)) | 354 | if (static_key_true(&supports_deactivate)) |
| 355 | gic_write_eoir(irqnr); | 355 | gic_write_eoir(irqnr); |
| 356 | else | ||
| 357 | isb(); | ||
| 356 | 358 | ||
| 357 | err = handle_domain_irq(gic_data.domain, irqnr, regs); | 359 | err = handle_domain_irq(gic_data.domain, irqnr, regs); |
| 358 | if (err) { | 360 | if (err) { |
| @@ -640,11 +642,16 @@ static void gic_smp_init(void) | |||
| 640 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | 642 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
| 641 | bool force) | 643 | bool force) |
| 642 | { | 644 | { |
| 643 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); | 645 | unsigned int cpu; |
| 644 | void __iomem *reg; | 646 | void __iomem *reg; |
| 645 | int enabled; | 647 | int enabled; |
| 646 | u64 val; | 648 | u64 val; |
| 647 | 649 | ||
| 650 | if (force) | ||
| 651 | cpu = cpumask_first(mask_val); | ||
| 652 | else | ||
| 653 | cpu = cpumask_any_and(mask_val, cpu_online_mask); | ||
| 654 | |||
| 648 | if (cpu >= nr_cpu_ids) | 655 | if (cpu >= nr_cpu_ids) |
| 649 | return -EINVAL; | 656 | return -EINVAL; |
| 650 | 657 | ||
| @@ -831,8 +838,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
| 831 | if (ret) | 838 | if (ret) |
| 832 | return ret; | 839 | return ret; |
| 833 | 840 | ||
| 834 | for (i = 0; i < nr_irqs; i++) | 841 | for (i = 0; i < nr_irqs; i++) { |
| 835 | gic_irq_domain_map(domain, virq + i, hwirq + i); | 842 | ret = gic_irq_domain_map(domain, virq + i, hwirq + i); |
| 843 | if (ret) | ||
| 844 | return ret; | ||
| 845 | } | ||
| 836 | 846 | ||
| 837 | return 0; | 847 | return 0; |
| 838 | } | 848 | } |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 1b1df4f770bd..d3e7c43718b8 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
| @@ -361,6 +361,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) | |||
| 361 | if (likely(irqnr > 15 && irqnr < 1020)) { | 361 | if (likely(irqnr > 15 && irqnr < 1020)) { |
| 362 | if (static_key_true(&supports_deactivate)) | 362 | if (static_key_true(&supports_deactivate)) |
| 363 | writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); | 363 | writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); |
| 364 | isb(); | ||
| 364 | handle_domain_irq(gic->domain, irqnr, regs); | 365 | handle_domain_irq(gic->domain, irqnr, regs); |
| 365 | continue; | 366 | continue; |
| 366 | } | 367 | } |
| @@ -401,10 +402,12 @@ static void gic_handle_cascade_irq(struct irq_desc *desc) | |||
| 401 | goto out; | 402 | goto out; |
| 402 | 403 | ||
| 403 | cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); | 404 | cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); |
| 404 | if (unlikely(gic_irq < 32 || gic_irq > 1020)) | 405 | if (unlikely(gic_irq < 32 || gic_irq > 1020)) { |
| 405 | handle_bad_irq(desc); | 406 | handle_bad_irq(desc); |
| 406 | else | 407 | } else { |
| 408 | isb(); | ||
| 407 | generic_handle_irq(cascade_irq); | 409 | generic_handle_irq(cascade_irq); |
| 410 | } | ||
| 408 | 411 | ||
| 409 | out: | 412 | out: |
| 410 | chained_irq_exit(chip, desc); | 413 | chained_irq_exit(chip, desc); |
| @@ -1027,8 +1030,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
| 1027 | if (ret) | 1030 | if (ret) |
| 1028 | return ret; | 1031 | return ret; |
| 1029 | 1032 | ||
| 1030 | for (i = 0; i < nr_irqs; i++) | 1033 | for (i = 0; i < nr_irqs; i++) { |
| 1031 | gic_irq_domain_map(domain, virq + i, hwirq + i); | 1034 | ret = gic_irq_domain_map(domain, virq + i, hwirq + i); |
| 1035 | if (ret) | ||
| 1036 | return ret; | ||
| 1037 | } | ||
| 1032 | 1038 | ||
| 1033 | return 0; | 1039 | return 0; |
| 1034 | } | 1040 | } |
diff --git a/drivers/isdn/mISDN/fsm.c b/drivers/isdn/mISDN/fsm.c index 78fc5d5e9051..92e6570b1143 100644 --- a/drivers/isdn/mISDN/fsm.c +++ b/drivers/isdn/mISDN/fsm.c | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | 26 | ||
| 27 | #define FSM_TIMER_DEBUG 0 | 27 | #define FSM_TIMER_DEBUG 0 |
| 28 | 28 | ||
| 29 | void | 29 | int |
| 30 | mISDN_FsmNew(struct Fsm *fsm, | 30 | mISDN_FsmNew(struct Fsm *fsm, |
| 31 | struct FsmNode *fnlist, int fncount) | 31 | struct FsmNode *fnlist, int fncount) |
| 32 | { | 32 | { |
| @@ -34,6 +34,8 @@ mISDN_FsmNew(struct Fsm *fsm, | |||
| 34 | 34 | ||
| 35 | fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count * | 35 | fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count * |
| 36 | fsm->event_count, GFP_KERNEL); | 36 | fsm->event_count, GFP_KERNEL); |
| 37 | if (fsm->jumpmatrix == NULL) | ||
| 38 | return -ENOMEM; | ||
| 37 | 39 | ||
| 38 | for (i = 0; i < fncount; i++) | 40 | for (i = 0; i < fncount; i++) |
| 39 | if ((fnlist[i].state >= fsm->state_count) || | 41 | if ((fnlist[i].state >= fsm->state_count) || |
| @@ -45,6 +47,7 @@ mISDN_FsmNew(struct Fsm *fsm, | |||
| 45 | } else | 47 | } else |
| 46 | fsm->jumpmatrix[fsm->state_count * fnlist[i].event + | 48 | fsm->jumpmatrix[fsm->state_count * fnlist[i].event + |
| 47 | fnlist[i].state] = (FSMFNPTR) fnlist[i].routine; | 49 | fnlist[i].state] = (FSMFNPTR) fnlist[i].routine; |
| 50 | return 0; | ||
| 48 | } | 51 | } |
| 49 | EXPORT_SYMBOL(mISDN_FsmNew); | 52 | EXPORT_SYMBOL(mISDN_FsmNew); |
| 50 | 53 | ||
diff --git a/drivers/isdn/mISDN/fsm.h b/drivers/isdn/mISDN/fsm.h index 928f5be192c1..e1def8490221 100644 --- a/drivers/isdn/mISDN/fsm.h +++ b/drivers/isdn/mISDN/fsm.h | |||
| @@ -55,7 +55,7 @@ struct FsmTimer { | |||
| 55 | void *arg; | 55 | void *arg; |
| 56 | }; | 56 | }; |
| 57 | 57 | ||
| 58 | extern void mISDN_FsmNew(struct Fsm *, struct FsmNode *, int); | 58 | extern int mISDN_FsmNew(struct Fsm *, struct FsmNode *, int); |
| 59 | extern void mISDN_FsmFree(struct Fsm *); | 59 | extern void mISDN_FsmFree(struct Fsm *); |
| 60 | extern int mISDN_FsmEvent(struct FsmInst *, int , void *); | 60 | extern int mISDN_FsmEvent(struct FsmInst *, int , void *); |
| 61 | extern void mISDN_FsmChangeState(struct FsmInst *, int); | 61 | extern void mISDN_FsmChangeState(struct FsmInst *, int); |
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c index bebc57b72138..3192b0eb3944 100644 --- a/drivers/isdn/mISDN/layer1.c +++ b/drivers/isdn/mISDN/layer1.c | |||
| @@ -414,8 +414,7 @@ l1_init(u_int *deb) | |||
| 414 | l1fsm_s.event_count = L1_EVENT_COUNT; | 414 | l1fsm_s.event_count = L1_EVENT_COUNT; |
| 415 | l1fsm_s.strEvent = strL1Event; | 415 | l1fsm_s.strEvent = strL1Event; |
| 416 | l1fsm_s.strState = strL1SState; | 416 | l1fsm_s.strState = strL1SState; |
| 417 | mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList)); | 417 | return mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList)); |
| 418 | return 0; | ||
| 419 | } | 418 | } |
| 420 | 419 | ||
| 421 | void | 420 | void |
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c index 7243a6746f8b..9ff0903a0e89 100644 --- a/drivers/isdn/mISDN/layer2.c +++ b/drivers/isdn/mISDN/layer2.c | |||
| @@ -2247,15 +2247,26 @@ static struct Bprotocol X75SLP = { | |||
| 2247 | int | 2247 | int |
| 2248 | Isdnl2_Init(u_int *deb) | 2248 | Isdnl2_Init(u_int *deb) |
| 2249 | { | 2249 | { |
| 2250 | int res; | ||
| 2250 | debug = deb; | 2251 | debug = deb; |
| 2251 | mISDN_register_Bprotocol(&X75SLP); | 2252 | mISDN_register_Bprotocol(&X75SLP); |
| 2252 | l2fsm.state_count = L2_STATE_COUNT; | 2253 | l2fsm.state_count = L2_STATE_COUNT; |
| 2253 | l2fsm.event_count = L2_EVENT_COUNT; | 2254 | l2fsm.event_count = L2_EVENT_COUNT; |
| 2254 | l2fsm.strEvent = strL2Event; | 2255 | l2fsm.strEvent = strL2Event; |
| 2255 | l2fsm.strState = strL2State; | 2256 | l2fsm.strState = strL2State; |
| 2256 | mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList)); | 2257 | res = mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList)); |
| 2257 | TEIInit(deb); | 2258 | if (res) |
| 2259 | goto error; | ||
| 2260 | res = TEIInit(deb); | ||
| 2261 | if (res) | ||
| 2262 | goto error_fsm; | ||
| 2258 | return 0; | 2263 | return 0; |
| 2264 | |||
| 2265 | error_fsm: | ||
| 2266 | mISDN_FsmFree(&l2fsm); | ||
| 2267 | error: | ||
| 2268 | mISDN_unregister_Bprotocol(&X75SLP); | ||
| 2269 | return res; | ||
| 2259 | } | 2270 | } |
| 2260 | 2271 | ||
| 2261 | void | 2272 | void |
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c index 908127efccf8..12d9e5f4beb1 100644 --- a/drivers/isdn/mISDN/tei.c +++ b/drivers/isdn/mISDN/tei.c | |||
| @@ -1387,23 +1387,37 @@ create_teimanager(struct mISDNdevice *dev) | |||
| 1387 | 1387 | ||
| 1388 | int TEIInit(u_int *deb) | 1388 | int TEIInit(u_int *deb) |
| 1389 | { | 1389 | { |
| 1390 | int res; | ||
| 1390 | debug = deb; | 1391 | debug = deb; |
| 1391 | teifsmu.state_count = TEI_STATE_COUNT; | 1392 | teifsmu.state_count = TEI_STATE_COUNT; |
| 1392 | teifsmu.event_count = TEI_EVENT_COUNT; | 1393 | teifsmu.event_count = TEI_EVENT_COUNT; |
| 1393 | teifsmu.strEvent = strTeiEvent; | 1394 | teifsmu.strEvent = strTeiEvent; |
| 1394 | teifsmu.strState = strTeiState; | 1395 | teifsmu.strState = strTeiState; |
| 1395 | mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser)); | 1396 | res = mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser)); |
| 1397 | if (res) | ||
| 1398 | goto error; | ||
| 1396 | teifsmn.state_count = TEI_STATE_COUNT; | 1399 | teifsmn.state_count = TEI_STATE_COUNT; |
| 1397 | teifsmn.event_count = TEI_EVENT_COUNT; | 1400 | teifsmn.event_count = TEI_EVENT_COUNT; |
| 1398 | teifsmn.strEvent = strTeiEvent; | 1401 | teifsmn.strEvent = strTeiEvent; |
| 1399 | teifsmn.strState = strTeiState; | 1402 | teifsmn.strState = strTeiState; |
| 1400 | mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet)); | 1403 | res = mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet)); |
| 1404 | if (res) | ||
| 1405 | goto error_smn; | ||
| 1401 | deactfsm.state_count = DEACT_STATE_COUNT; | 1406 | deactfsm.state_count = DEACT_STATE_COUNT; |
| 1402 | deactfsm.event_count = DEACT_EVENT_COUNT; | 1407 | deactfsm.event_count = DEACT_EVENT_COUNT; |
| 1403 | deactfsm.strEvent = strDeactEvent; | 1408 | deactfsm.strEvent = strDeactEvent; |
| 1404 | deactfsm.strState = strDeactState; | 1409 | deactfsm.strState = strDeactState; |
| 1405 | mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList)); | 1410 | res = mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList)); |
| 1411 | if (res) | ||
| 1412 | goto error_deact; | ||
| 1406 | return 0; | 1413 | return 0; |
| 1414 | |||
| 1415 | error_deact: | ||
| 1416 | mISDN_FsmFree(&teifsmn); | ||
| 1417 | error_smn: | ||
| 1418 | mISDN_FsmFree(&teifsmu); | ||
| 1419 | error: | ||
| 1420 | return res; | ||
| 1407 | } | 1421 | } |
| 1408 | 1422 | ||
| 1409 | void TEIFree(void) | 1423 | void TEIFree(void) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index c99634612fc4..b01e458d31e9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -7996,7 +7996,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi) | |||
| 7996 | if (mddev->safemode == 1) | 7996 | if (mddev->safemode == 1) |
| 7997 | mddev->safemode = 0; | 7997 | mddev->safemode = 0; |
| 7998 | /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ | 7998 | /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ |
| 7999 | if (mddev->in_sync || !mddev->sync_checkers) { | 7999 | if (mddev->in_sync || mddev->sync_checkers) { |
| 8000 | spin_lock(&mddev->lock); | 8000 | spin_lock(&mddev->lock); |
| 8001 | if (mddev->in_sync) { | 8001 | if (mddev->in_sync) { |
| 8002 | mddev->in_sync = 0; | 8002 | mddev->in_sync = 0; |
| @@ -8656,6 +8656,9 @@ void md_check_recovery(struct mddev *mddev) | |||
| 8656 | if (mddev_trylock(mddev)) { | 8656 | if (mddev_trylock(mddev)) { |
| 8657 | int spares = 0; | 8657 | int spares = 0; |
| 8658 | 8658 | ||
| 8659 | if (!mddev->external && mddev->safemode == 1) | ||
| 8660 | mddev->safemode = 0; | ||
| 8661 | |||
| 8659 | if (mddev->ro) { | 8662 | if (mddev->ro) { |
| 8660 | struct md_rdev *rdev; | 8663 | struct md_rdev *rdev; |
| 8661 | if (!mddev->external && mddev->in_sync) | 8664 | if (!mddev->external && mddev->in_sync) |
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index bfa1e907c472..2dcbafa8e66c 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
| @@ -236,9 +236,10 @@ struct r5l_io_unit { | |||
| 236 | bool need_split_bio; | 236 | bool need_split_bio; |
| 237 | struct bio *split_bio; | 237 | struct bio *split_bio; |
| 238 | 238 | ||
| 239 | unsigned int has_flush:1; /* include flush request */ | 239 | unsigned int has_flush:1; /* include flush request */ |
| 240 | unsigned int has_fua:1; /* include fua request */ | 240 | unsigned int has_fua:1; /* include fua request */ |
| 241 | unsigned int has_null_flush:1; /* include empty flush request */ | 241 | unsigned int has_null_flush:1; /* include null flush request */ |
| 242 | unsigned int has_flush_payload:1; /* include flush payload */ | ||
| 242 | /* | 243 | /* |
| 243 | * io isn't sent yet, flush/fua request can only be submitted till it's | 244 | * io isn't sent yet, flush/fua request can only be submitted till it's |
| 244 | * the first IO in running_ios list | 245 | * the first IO in running_ios list |
| @@ -571,6 +572,8 @@ static void r5l_log_endio(struct bio *bio) | |||
| 571 | struct r5l_io_unit *io_deferred; | 572 | struct r5l_io_unit *io_deferred; |
| 572 | struct r5l_log *log = io->log; | 573 | struct r5l_log *log = io->log; |
| 573 | unsigned long flags; | 574 | unsigned long flags; |
| 575 | bool has_null_flush; | ||
| 576 | bool has_flush_payload; | ||
| 574 | 577 | ||
| 575 | if (bio->bi_status) | 578 | if (bio->bi_status) |
| 576 | md_error(log->rdev->mddev, log->rdev); | 579 | md_error(log->rdev->mddev, log->rdev); |
| @@ -580,6 +583,16 @@ static void r5l_log_endio(struct bio *bio) | |||
| 580 | 583 | ||
| 581 | spin_lock_irqsave(&log->io_list_lock, flags); | 584 | spin_lock_irqsave(&log->io_list_lock, flags); |
| 582 | __r5l_set_io_unit_state(io, IO_UNIT_IO_END); | 585 | __r5l_set_io_unit_state(io, IO_UNIT_IO_END); |
| 586 | |||
| 587 | /* | ||
| 588 | * if the io doesn't not have null_flush or flush payload, | ||
| 589 | * it is not safe to access it after releasing io_list_lock. | ||
| 590 | * Therefore, it is necessary to check the condition with | ||
| 591 | * the lock held. | ||
| 592 | */ | ||
| 593 | has_null_flush = io->has_null_flush; | ||
| 594 | has_flush_payload = io->has_flush_payload; | ||
| 595 | |||
| 583 | if (log->need_cache_flush && !list_empty(&io->stripe_list)) | 596 | if (log->need_cache_flush && !list_empty(&io->stripe_list)) |
| 584 | r5l_move_to_end_ios(log); | 597 | r5l_move_to_end_ios(log); |
| 585 | else | 598 | else |
| @@ -600,19 +613,23 @@ static void r5l_log_endio(struct bio *bio) | |||
| 600 | if (log->need_cache_flush) | 613 | if (log->need_cache_flush) |
| 601 | md_wakeup_thread(log->rdev->mddev->thread); | 614 | md_wakeup_thread(log->rdev->mddev->thread); |
| 602 | 615 | ||
| 603 | if (io->has_null_flush) { | 616 | /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ |
| 617 | if (has_null_flush) { | ||
| 604 | struct bio *bi; | 618 | struct bio *bi; |
| 605 | 619 | ||
| 606 | WARN_ON(bio_list_empty(&io->flush_barriers)); | 620 | WARN_ON(bio_list_empty(&io->flush_barriers)); |
| 607 | while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { | 621 | while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { |
| 608 | bio_endio(bi); | 622 | bio_endio(bi); |
| 609 | atomic_dec(&io->pending_stripe); | 623 | if (atomic_dec_and_test(&io->pending_stripe)) { |
| 624 | __r5l_stripe_write_finished(io); | ||
| 625 | return; | ||
| 626 | } | ||
| 610 | } | 627 | } |
| 611 | } | 628 | } |
| 612 | 629 | /* decrease pending_stripe for flush payload */ | |
| 613 | /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ | 630 | if (has_flush_payload) |
| 614 | if (atomic_read(&io->pending_stripe) == 0) | 631 | if (atomic_dec_and_test(&io->pending_stripe)) |
| 615 | __r5l_stripe_write_finished(io); | 632 | __r5l_stripe_write_finished(io); |
| 616 | } | 633 | } |
| 617 | 634 | ||
| 618 | static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) | 635 | static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) |
| @@ -881,6 +898,11 @@ static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect) | |||
| 881 | payload->size = cpu_to_le32(sizeof(__le64)); | 898 | payload->size = cpu_to_le32(sizeof(__le64)); |
| 882 | payload->flush_stripes[0] = cpu_to_le64(sect); | 899 | payload->flush_stripes[0] = cpu_to_le64(sect); |
| 883 | io->meta_offset += meta_size; | 900 | io->meta_offset += meta_size; |
| 901 | /* multiple flush payloads count as one pending_stripe */ | ||
| 902 | if (!io->has_flush_payload) { | ||
| 903 | io->has_flush_payload = 1; | ||
| 904 | atomic_inc(&io->pending_stripe); | ||
| 905 | } | ||
| 884 | mutex_unlock(&log->io_mutex); | 906 | mutex_unlock(&log->io_mutex); |
| 885 | } | 907 | } |
| 886 | 908 | ||
| @@ -2540,23 +2562,32 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) | |||
| 2540 | */ | 2562 | */ |
| 2541 | int r5c_journal_mode_set(struct mddev *mddev, int mode) | 2563 | int r5c_journal_mode_set(struct mddev *mddev, int mode) |
| 2542 | { | 2564 | { |
| 2543 | struct r5conf *conf = mddev->private; | 2565 | struct r5conf *conf; |
| 2544 | struct r5l_log *log = conf->log; | 2566 | int err; |
| 2545 | |||
| 2546 | if (!log) | ||
| 2547 | return -ENODEV; | ||
| 2548 | 2567 | ||
| 2549 | if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || | 2568 | if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || |
| 2550 | mode > R5C_JOURNAL_MODE_WRITE_BACK) | 2569 | mode > R5C_JOURNAL_MODE_WRITE_BACK) |
| 2551 | return -EINVAL; | 2570 | return -EINVAL; |
| 2552 | 2571 | ||
| 2572 | err = mddev_lock(mddev); | ||
| 2573 | if (err) | ||
| 2574 | return err; | ||
| 2575 | conf = mddev->private; | ||
| 2576 | if (!conf || !conf->log) { | ||
| 2577 | mddev_unlock(mddev); | ||
| 2578 | return -ENODEV; | ||
| 2579 | } | ||
| 2580 | |||
| 2553 | if (raid5_calc_degraded(conf) > 0 && | 2581 | if (raid5_calc_degraded(conf) > 0 && |
| 2554 | mode == R5C_JOURNAL_MODE_WRITE_BACK) | 2582 | mode == R5C_JOURNAL_MODE_WRITE_BACK) { |
| 2583 | mddev_unlock(mddev); | ||
| 2555 | return -EINVAL; | 2584 | return -EINVAL; |
| 2585 | } | ||
| 2556 | 2586 | ||
| 2557 | mddev_suspend(mddev); | 2587 | mddev_suspend(mddev); |
| 2558 | conf->log->r5c_journal_mode = mode; | 2588 | conf->log->r5c_journal_mode = mode; |
| 2559 | mddev_resume(mddev); | 2589 | mddev_resume(mddev); |
| 2590 | mddev_unlock(mddev); | ||
| 2560 | 2591 | ||
| 2561 | pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", | 2592 | pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", |
| 2562 | mdname(mddev), mode, r5c_journal_mode_str[mode]); | 2593 | mdname(mddev), mode, r5c_journal_mode_str[mode]); |
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c index 99e644cda4d1..ebf69ff48ae2 100644 --- a/drivers/memory/atmel-ebi.c +++ b/drivers/memory/atmel-ebi.c | |||
| @@ -72,7 +72,7 @@ struct atmel_smc_timing_xlate { | |||
| 72 | { .name = nm, .converter = atmel_smc_cs_conf_set_pulse, .shift = pos} | 72 | { .name = nm, .converter = atmel_smc_cs_conf_set_pulse, .shift = pos} |
| 73 | 73 | ||
| 74 | #define ATMEL_SMC_CYCLE_XLATE(nm, pos) \ | 74 | #define ATMEL_SMC_CYCLE_XLATE(nm, pos) \ |
| 75 | { .name = nm, .converter = atmel_smc_cs_conf_set_setup, .shift = pos} | 75 | { .name = nm, .converter = atmel_smc_cs_conf_set_cycle, .shift = pos} |
| 76 | 76 | ||
| 77 | static void at91sam9_ebi_get_config(struct atmel_ebi_dev *ebid, | 77 | static void at91sam9_ebi_get_config(struct atmel_ebi_dev *ebid, |
| 78 | struct atmel_ebi_dev_config *conf) | 78 | struct atmel_ebi_dev_config *conf) |
| @@ -120,12 +120,14 @@ static int atmel_ebi_xslate_smc_timings(struct atmel_ebi_dev *ebid, | |||
| 120 | if (!ret) { | 120 | if (!ret) { |
| 121 | required = true; | 121 | required = true; |
| 122 | ncycles = DIV_ROUND_UP(val, clk_period_ns); | 122 | ncycles = DIV_ROUND_UP(val, clk_period_ns); |
| 123 | if (ncycles > ATMEL_SMC_MODE_TDF_MAX || | 123 | if (ncycles > ATMEL_SMC_MODE_TDF_MAX) { |
| 124 | ncycles < ATMEL_SMC_MODE_TDF_MIN) { | ||
| 125 | ret = -EINVAL; | 124 | ret = -EINVAL; |
| 126 | goto out; | 125 | goto out; |
| 127 | } | 126 | } |
| 128 | 127 | ||
| 128 | if (ncycles < ATMEL_SMC_MODE_TDF_MIN) | ||
| 129 | ncycles = ATMEL_SMC_MODE_TDF_MIN; | ||
| 130 | |||
| 129 | smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles); | 131 | smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles); |
| 130 | } | 132 | } |
| 131 | 133 | ||
| @@ -263,7 +265,7 @@ static int atmel_ebi_xslate_smc_config(struct atmel_ebi_dev *ebid, | |||
| 263 | } | 265 | } |
| 264 | 266 | ||
| 265 | ret = atmel_ebi_xslate_smc_timings(ebid, np, &conf->smcconf); | 267 | ret = atmel_ebi_xslate_smc_timings(ebid, np, &conf->smcconf); |
| 266 | if (ret) | 268 | if (ret < 0) |
| 267 | return -EINVAL; | 269 | return -EINVAL; |
| 268 | 270 | ||
| 269 | if ((ret > 0 && !required) || (!ret && required)) { | 271 | if ((ret > 0 && !required) || (!ret && required)) { |
diff --git a/drivers/mfd/atmel-smc.c b/drivers/mfd/atmel-smc.c index 954cf0f66a31..20cc0ea470fa 100644 --- a/drivers/mfd/atmel-smc.c +++ b/drivers/mfd/atmel-smc.c | |||
| @@ -206,7 +206,7 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_set_pulse); | |||
| 206 | * parameter | 206 | * parameter |
| 207 | * | 207 | * |
| 208 | * This function encodes the @ncycles value as described in the datasheet | 208 | * This function encodes the @ncycles value as described in the datasheet |
| 209 | * (section "SMC Pulse Register"), and then stores the result in the | 209 | * (section "SMC Cycle Register"), and then stores the result in the |
| 210 | * @conf->setup field at @shift position. | 210 | * @conf->setup field at @shift position. |
| 211 | * | 211 | * |
| 212 | * Returns -EINVAL if @shift is invalid, -ERANGE if @ncycles does not fit in | 212 | * Returns -EINVAL if @shift is invalid, -ERANGE if @ncycles does not fit in |
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c index fbe0f245ce8e..fe1811523e4a 100644 --- a/drivers/mfd/da9062-core.c +++ b/drivers/mfd/da9062-core.c | |||
| @@ -645,6 +645,9 @@ static const struct regmap_range da9062_aa_readable_ranges[] = { | |||
| 645 | .range_min = DA9062AA_VLDO1_B, | 645 | .range_min = DA9062AA_VLDO1_B, |
| 646 | .range_max = DA9062AA_VLDO4_B, | 646 | .range_max = DA9062AA_VLDO4_B, |
| 647 | }, { | 647 | }, { |
| 648 | .range_min = DA9062AA_BBAT_CONT, | ||
| 649 | .range_max = DA9062AA_BBAT_CONT, | ||
| 650 | }, { | ||
| 648 | .range_min = DA9062AA_INTERFACE, | 651 | .range_min = DA9062AA_INTERFACE, |
| 649 | .range_max = DA9062AA_CONFIG_E, | 652 | .range_max = DA9062AA_CONFIG_E, |
| 650 | }, { | 653 | }, { |
| @@ -721,6 +724,9 @@ static const struct regmap_range da9062_aa_writeable_ranges[] = { | |||
| 721 | .range_min = DA9062AA_VLDO1_B, | 724 | .range_min = DA9062AA_VLDO1_B, |
| 722 | .range_max = DA9062AA_VLDO4_B, | 725 | .range_max = DA9062AA_VLDO4_B, |
| 723 | }, { | 726 | }, { |
| 727 | .range_min = DA9062AA_BBAT_CONT, | ||
| 728 | .range_max = DA9062AA_BBAT_CONT, | ||
| 729 | }, { | ||
| 724 | .range_min = DA9062AA_GP_ID_0, | 730 | .range_min = DA9062AA_GP_ID_0, |
| 725 | .range_max = DA9062AA_GP_ID_19, | 731 | .range_max = DA9062AA_GP_ID_19, |
| 726 | }, | 732 | }, |
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 8621a198a2ce..bac33311f55a 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
| @@ -216,6 +216,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 216 | pci_set_drvdata(pdev, dev); | 216 | pci_set_drvdata(pdev, dev); |
| 217 | 217 | ||
| 218 | /* | 218 | /* |
| 219 | * MEI requires to resume from runtime suspend mode | ||
| 220 | * in order to perform link reset flow upon system suspend. | ||
| 221 | */ | ||
| 222 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; | ||
| 223 | |||
| 224 | /* | ||
| 219 | * For not wake-able HW runtime pm framework | 225 | * For not wake-able HW runtime pm framework |
| 220 | * can't be used on pci device level. | 226 | * can't be used on pci device level. |
| 221 | * Use domain runtime pm callbacks instead. | 227 | * Use domain runtime pm callbacks instead. |
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c index f811cd524468..e38a5f144373 100644 --- a/drivers/misc/mei/pci-txe.c +++ b/drivers/misc/mei/pci-txe.c | |||
| @@ -138,6 +138,12 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 138 | pci_set_drvdata(pdev, dev); | 138 | pci_set_drvdata(pdev, dev); |
| 139 | 139 | ||
| 140 | /* | 140 | /* |
| 141 | * MEI requires to resume from runtime suspend mode | ||
| 142 | * in order to perform link reset flow upon system suspend. | ||
| 143 | */ | ||
| 144 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; | ||
| 145 | |||
| 146 | /* | ||
| 141 | * For not wake-able HW runtime pm framework | 147 | * For not wake-able HW runtime pm framework |
| 142 | * can't be used on pci device level. | 148 | * can't be used on pci device level. |
| 143 | * Use domain runtime pm callbacks instead. | 149 | * Use domain runtime pm callbacks instead. |
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index e5938c791330..f1bbfd389367 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c | |||
| @@ -2170,7 +2170,9 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) | |||
| 2170 | * from being accepted. | 2170 | * from being accepted. |
| 2171 | */ | 2171 | */ |
| 2172 | card = md->queue.card; | 2172 | card = md->queue.card; |
| 2173 | spin_lock_irq(md->queue.queue->queue_lock); | ||
| 2173 | queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue); | 2174 | queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue); |
| 2175 | spin_unlock_irq(md->queue.queue->queue_lock); | ||
| 2174 | blk_set_queue_dying(md->queue.queue); | 2176 | blk_set_queue_dying(md->queue.queue); |
| 2175 | mmc_cleanup_queue(&md->queue); | 2177 | mmc_cleanup_queue(&md->queue); |
| 2176 | if (md->disk->flags & GENHD_FL_UP) { | 2178 | if (md->disk->flags & GENHD_FL_UP) { |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 4ffea14b7eb6..2bae69e39544 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
| @@ -1289,7 +1289,7 @@ out_err: | |||
| 1289 | static int mmc_select_hs400es(struct mmc_card *card) | 1289 | static int mmc_select_hs400es(struct mmc_card *card) |
| 1290 | { | 1290 | { |
| 1291 | struct mmc_host *host = card->host; | 1291 | struct mmc_host *host = card->host; |
| 1292 | int err = 0; | 1292 | int err = -EINVAL; |
| 1293 | u8 val; | 1293 | u8 val; |
| 1294 | 1294 | ||
| 1295 | if (!(host->caps & MMC_CAP_8_BIT_DATA)) { | 1295 | if (!(host->caps & MMC_CAP_8_BIT_DATA)) { |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 04ff3c97a535..2ab4788d021f 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
| @@ -2086,7 +2086,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev) | |||
| 2086 | mmc->max_seg_size = mmc->max_req_size; | 2086 | mmc->max_seg_size = mmc->max_req_size; |
| 2087 | 2087 | ||
| 2088 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | | 2088 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | |
| 2089 | MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE; | 2089 | MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23; |
| 2090 | 2090 | ||
| 2091 | mmc->caps |= mmc_pdata(host)->caps; | 2091 | mmc->caps |= mmc_pdata(host)->caps; |
| 2092 | if (mmc->caps & MMC_CAP_8_BIT_DATA) | 2092 | if (mmc->caps & MMC_CAP_8_BIT_DATA) |
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index f336a9b85576..9ec8f033ac5f 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
| @@ -113,6 +113,7 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
| 113 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) | 113 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
| 114 | if (tr->writesect(dev, block, buf)) | 114 | if (tr->writesect(dev, block, buf)) |
| 115 | return BLK_STS_IOERR; | 115 | return BLK_STS_IOERR; |
| 116 | return BLK_STS_OK; | ||
| 116 | default: | 117 | default: |
| 117 | return BLK_STS_IOERR; | 118 | return BLK_STS_IOERR; |
| 118 | } | 119 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 9bee6c1c70cc..fc63992ab0e0 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -1569,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
| 1569 | new_slave->delay = 0; | 1569 | new_slave->delay = 0; |
| 1570 | new_slave->link_failure_count = 0; | 1570 | new_slave->link_failure_count = 0; |
| 1571 | 1571 | ||
| 1572 | if (bond_update_speed_duplex(new_slave)) | 1572 | if (bond_update_speed_duplex(new_slave) && |
| 1573 | bond_needs_speed_duplex(bond)) | ||
| 1573 | new_slave->link = BOND_LINK_DOWN; | 1574 | new_slave->link = BOND_LINK_DOWN; |
| 1574 | 1575 | ||
| 1575 | new_slave->last_rx = jiffies - | 1576 | new_slave->last_rx = jiffies - |
| @@ -2140,11 +2141,13 @@ static void bond_miimon_commit(struct bonding *bond) | |||
| 2140 | continue; | 2141 | continue; |
| 2141 | 2142 | ||
| 2142 | case BOND_LINK_UP: | 2143 | case BOND_LINK_UP: |
| 2143 | if (bond_update_speed_duplex(slave)) { | 2144 | if (bond_update_speed_duplex(slave) && |
| 2145 | bond_needs_speed_duplex(bond)) { | ||
| 2144 | slave->link = BOND_LINK_DOWN; | 2146 | slave->link = BOND_LINK_DOWN; |
| 2145 | netdev_warn(bond->dev, | 2147 | if (net_ratelimit()) |
| 2146 | "failed to get link speed/duplex for %s\n", | 2148 | netdev_warn(bond->dev, |
| 2147 | slave->dev->name); | 2149 | "failed to get link speed/duplex for %s\n", |
| 2150 | slave->dev->name); | ||
| 2148 | continue; | 2151 | continue; |
| 2149 | } | 2152 | } |
| 2150 | bond_set_slave_link_state(slave, BOND_LINK_UP, | 2153 | bond_set_slave_link_state(slave, BOND_LINK_UP, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ef4be781fd05..09ea62ee96d3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
| @@ -529,6 +529,7 @@ enum { /* adapter flags */ | |||
| 529 | USING_SOFT_PARAMS = (1 << 6), | 529 | USING_SOFT_PARAMS = (1 << 6), |
| 530 | MASTER_PF = (1 << 7), | 530 | MASTER_PF = (1 << 7), |
| 531 | FW_OFLD_CONN = (1 << 9), | 531 | FW_OFLD_CONN = (1 << 9), |
| 532 | ROOT_NO_RELAXED_ORDERING = (1 << 10), | ||
| 532 | }; | 533 | }; |
| 533 | 534 | ||
| 534 | enum { | 535 | enum { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e403fa18f1b1..33bb8678833a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
| @@ -4654,11 +4654,6 @@ static void print_port_info(const struct net_device *dev) | |||
| 4654 | dev->name, adap->params.vpd.id, adap->name, buf); | 4654 | dev->name, adap->params.vpd.id, adap->name, buf); |
| 4655 | } | 4655 | } |
| 4656 | 4656 | ||
| 4657 | static void enable_pcie_relaxed_ordering(struct pci_dev *dev) | ||
| 4658 | { | ||
| 4659 | pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); | ||
| 4660 | } | ||
| 4661 | |||
| 4662 | /* | 4657 | /* |
| 4663 | * Free the following resources: | 4658 | * Free the following resources: |
| 4664 | * - memory used for tables | 4659 | * - memory used for tables |
| @@ -4908,7 +4903,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 4908 | } | 4903 | } |
| 4909 | 4904 | ||
| 4910 | pci_enable_pcie_error_reporting(pdev); | 4905 | pci_enable_pcie_error_reporting(pdev); |
| 4911 | enable_pcie_relaxed_ordering(pdev); | ||
| 4912 | pci_set_master(pdev); | 4906 | pci_set_master(pdev); |
| 4913 | pci_save_state(pdev); | 4907 | pci_save_state(pdev); |
| 4914 | 4908 | ||
| @@ -4947,6 +4941,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 4947 | adapter->msg_enable = DFLT_MSG_ENABLE; | 4941 | adapter->msg_enable = DFLT_MSG_ENABLE; |
| 4948 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); | 4942 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); |
| 4949 | 4943 | ||
| 4944 | /* If possible, we use PCIe Relaxed Ordering Attribute to deliver | ||
| 4945 | * Ingress Packet Data to Free List Buffers in order to allow for | ||
| 4946 | * chipset performance optimizations between the Root Complex and | ||
| 4947 | * Memory Controllers. (Messages to the associated Ingress Queue | ||
| 4948 | * notifying new Packet Placement in the Free Lists Buffers will be | ||
| 4949 | * send without the Relaxed Ordering Attribute thus guaranteeing that | ||
| 4950 | * all preceding PCIe Transaction Layer Packets will be processed | ||
| 4951 | * first.) But some Root Complexes have various issues with Upstream | ||
| 4952 | * Transaction Layer Packets with the Relaxed Ordering Attribute set. | ||
| 4953 | * The PCIe devices which under the Root Complexes will be cleared the | ||
| 4954 | * Relaxed Ordering bit in the configuration space, So we check our | ||
| 4955 | * PCIe configuration space to see if it's flagged with advice against | ||
| 4956 | * using Relaxed Ordering. | ||
| 4957 | */ | ||
| 4958 | if (!pcie_relaxed_ordering_enabled(pdev)) | ||
| 4959 | adapter->flags |= ROOT_NO_RELAXED_ORDERING; | ||
| 4960 | |||
| 4950 | spin_lock_init(&adapter->stats_lock); | 4961 | spin_lock_init(&adapter->stats_lock); |
| 4951 | spin_lock_init(&adapter->tid_release_lock); | 4962 | spin_lock_init(&adapter->tid_release_lock); |
| 4952 | spin_lock_init(&adapter->win0_lock); | 4963 | spin_lock_init(&adapter->win0_lock); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ede12209f20b..4ef68f69b58c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
| @@ -2719,6 +2719,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, | |||
| 2719 | struct fw_iq_cmd c; | 2719 | struct fw_iq_cmd c; |
| 2720 | struct sge *s = &adap->sge; | 2720 | struct sge *s = &adap->sge; |
| 2721 | struct port_info *pi = netdev_priv(dev); | 2721 | struct port_info *pi = netdev_priv(dev); |
| 2722 | int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING); | ||
| 2722 | 2723 | ||
| 2723 | /* Size needs to be multiple of 16, including status entry. */ | 2724 | /* Size needs to be multiple of 16, including status entry. */ |
| 2724 | iq->size = roundup(iq->size, 16); | 2725 | iq->size = roundup(iq->size, 16); |
| @@ -2772,8 +2773,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, | |||
| 2772 | 2773 | ||
| 2773 | flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); | 2774 | flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); |
| 2774 | c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | | 2775 | c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | |
| 2775 | FW_IQ_CMD_FL0FETCHRO_F | | 2776 | FW_IQ_CMD_FL0FETCHRO_V(relaxed) | |
| 2776 | FW_IQ_CMD_FL0DATARO_F | | 2777 | FW_IQ_CMD_FL0DATARO_V(relaxed) | |
| 2777 | FW_IQ_CMD_FL0PADEN_F); | 2778 | FW_IQ_CMD_FL0PADEN_F); |
| 2778 | if (cong >= 0) | 2779 | if (cong >= 0) |
| 2779 | c.iqns_to_fl0congen |= | 2780 | c.iqns_to_fl0congen |= |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 109bc630408b..08c6ddb84a04 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h | |||
| @@ -408,6 +408,7 @@ enum { /* adapter flags */ | |||
| 408 | USING_MSI = (1UL << 1), | 408 | USING_MSI = (1UL << 1), |
| 409 | USING_MSIX = (1UL << 2), | 409 | USING_MSIX = (1UL << 2), |
| 410 | QUEUES_BOUND = (1UL << 3), | 410 | QUEUES_BOUND = (1UL << 3), |
| 411 | ROOT_NO_RELAXED_ORDERING = (1UL << 4), | ||
| 411 | }; | 412 | }; |
| 412 | 413 | ||
| 413 | /* | 414 | /* |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index ac7a150c54e9..2b85b874fd0d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | |||
| @@ -2888,6 +2888,24 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
| 2888 | */ | 2888 | */ |
| 2889 | adapter->name = pci_name(pdev); | 2889 | adapter->name = pci_name(pdev); |
| 2890 | adapter->msg_enable = DFLT_MSG_ENABLE; | 2890 | adapter->msg_enable = DFLT_MSG_ENABLE; |
| 2891 | |||
| 2892 | /* If possible, we use PCIe Relaxed Ordering Attribute to deliver | ||
| 2893 | * Ingress Packet Data to Free List Buffers in order to allow for | ||
| 2894 | * chipset performance optimizations between the Root Complex and | ||
| 2895 | * Memory Controllers. (Messages to the associated Ingress Queue | ||
| 2896 | * notifying new Packet Placement in the Free Lists Buffers will be | ||
| 2897 | * send without the Relaxed Ordering Attribute thus guaranteeing that | ||
| 2898 | * all preceding PCIe Transaction Layer Packets will be processed | ||
| 2899 | * first.) But some Root Complexes have various issues with Upstream | ||
| 2900 | * Transaction Layer Packets with the Relaxed Ordering Attribute set. | ||
| 2901 | * The PCIe devices which under the Root Complexes will be cleared the | ||
| 2902 | * Relaxed Ordering bit in the configuration space, So we check our | ||
| 2903 | * PCIe configuration space to see if it's flagged with advice against | ||
| 2904 | * using Relaxed Ordering. | ||
| 2905 | */ | ||
| 2906 | if (!pcie_relaxed_ordering_enabled(pdev)) | ||
| 2907 | adapter->flags |= ROOT_NO_RELAXED_ORDERING; | ||
| 2908 | |||
| 2891 | err = adap_init0(adapter); | 2909 | err = adap_init0(adapter); |
| 2892 | if (err) | 2910 | if (err) |
| 2893 | goto err_unmap_bar; | 2911 | goto err_unmap_bar; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index e37dde2ba97f..05498e7f2840 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
| @@ -2205,6 +2205,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, | |||
| 2205 | struct port_info *pi = netdev_priv(dev); | 2205 | struct port_info *pi = netdev_priv(dev); |
| 2206 | struct fw_iq_cmd cmd, rpl; | 2206 | struct fw_iq_cmd cmd, rpl; |
| 2207 | int ret, iqandst, flsz = 0; | 2207 | int ret, iqandst, flsz = 0; |
| 2208 | int relaxed = !(adapter->flags & ROOT_NO_RELAXED_ORDERING); | ||
| 2208 | 2209 | ||
| 2209 | /* | 2210 | /* |
| 2210 | * If we're using MSI interrupts and we're not initializing the | 2211 | * If we're using MSI interrupts and we're not initializing the |
| @@ -2300,6 +2301,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, | |||
| 2300 | cpu_to_be32( | 2301 | cpu_to_be32( |
| 2301 | FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) | | 2302 | FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) | |
| 2302 | FW_IQ_CMD_FL0PACKEN_F | | 2303 | FW_IQ_CMD_FL0PACKEN_F | |
| 2304 | FW_IQ_CMD_FL0FETCHRO_V(relaxed) | | ||
| 2305 | FW_IQ_CMD_FL0DATARO_V(relaxed) | | ||
| 2303 | FW_IQ_CMD_FL0PADEN_F); | 2306 | FW_IQ_CMD_FL0PADEN_F); |
| 2304 | 2307 | ||
| 2305 | /* In T6, for egress queue type FL there is internal overhead | 2308 | /* In T6, for egress queue type FL there is internal overhead |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 09b9bc17bce9..5fe5cdc51357 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -432,7 +432,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
| 432 | /* Virtual PCI function needs to determine UAR page size from | 432 | /* Virtual PCI function needs to determine UAR page size from |
| 433 | * firmware. Only master PCI function can set the uar page size | 433 | * firmware. Only master PCI function can set the uar page size |
| 434 | */ | 434 | */ |
| 435 | if (enable_4k_uar) | 435 | if (enable_4k_uar || !dev->persist->num_vfs) |
| 436 | dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; | 436 | dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; |
| 437 | else | 437 | else |
| 438 | dev->uar_page_shift = PAGE_SHIFT; | 438 | dev->uar_page_shift = PAGE_SHIFT; |
| @@ -2277,7 +2277,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
| 2277 | 2277 | ||
| 2278 | dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; | 2278 | dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; |
| 2279 | 2279 | ||
| 2280 | if (enable_4k_uar) { | 2280 | if (enable_4k_uar || !dev->persist->num_vfs) { |
| 2281 | init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + | 2281 | init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + |
| 2282 | PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; | 2282 | PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; |
| 2283 | init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; | 2283 | init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index dd7fa9cf225f..b0837b58c3a1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c | |||
| @@ -115,14 +115,10 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) | |||
| 115 | return; | 115 | return; |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | if (link) { | 118 | if (link) |
| 119 | netif_carrier_on(netdev); | 119 | netif_carrier_on(netdev); |
| 120 | rtnl_lock(); | 120 | else |
| 121 | dev_set_mtu(netdev, be16_to_cpu(msg->mtu)); | ||
| 122 | rtnl_unlock(); | ||
| 123 | } else { | ||
| 124 | netif_carrier_off(netdev); | 121 | netif_carrier_off(netdev); |
| 125 | } | ||
| 126 | rcu_read_unlock(); | 122 | rcu_read_unlock(); |
| 127 | } | 123 | } |
| 128 | 124 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 4631ca8b8eb2..9f77ce038a4a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
| @@ -908,8 +908,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) | |||
| 908 | return NETDEV_TX_OK; | 908 | return NETDEV_TX_OK; |
| 909 | 909 | ||
| 910 | err_unmap: | 910 | err_unmap: |
| 911 | --f; | 911 | while (--f >= 0) { |
| 912 | while (f >= 0) { | ||
| 913 | frag = &skb_shinfo(skb)->frags[f]; | 912 | frag = &skb_shinfo(skb)->frags[f]; |
| 914 | dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, | 913 | dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, |
| 915 | skb_frag_size(frag), DMA_TO_DEVICE); | 914 | skb_frag_size(frag), DMA_TO_DEVICE); |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 66ff15d08bad..0a66389c06c2 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c | |||
| @@ -2311,7 +2311,7 @@ netxen_md_rdqueue(struct netxen_adapter *adapter, | |||
| 2311 | loop_cnt++) { | 2311 | loop_cnt++) { |
| 2312 | NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id); | 2312 | NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id); |
| 2313 | read_addr = queueEntry->read_addr; | 2313 | read_addr = queueEntry->read_addr; |
| 2314 | for (k = 0; k < read_cnt; k--) { | 2314 | for (k = 0; k < read_cnt; k++) { |
| 2315 | NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, | 2315 | NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, |
| 2316 | &read_value); | 2316 | &read_value); |
| 2317 | *data_buff++ = read_value; | 2317 | *data_buff++ = read_value; |
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index c905971c5f3a..990a63d7fcb7 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c | |||
| @@ -938,7 +938,6 @@ enum efx_stats_action { | |||
| 938 | static int efx_mcdi_mac_stats(struct efx_nic *efx, | 938 | static int efx_mcdi_mac_stats(struct efx_nic *efx, |
| 939 | enum efx_stats_action action, int clear) | 939 | enum efx_stats_action action, int clear) |
| 940 | { | 940 | { |
| 941 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | ||
| 942 | MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); | 941 | MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); |
| 943 | int rc; | 942 | int rc; |
| 944 | int change = action == EFX_STATS_PULL ? 0 : 1; | 943 | int change = action == EFX_STATS_PULL ? 0 : 1; |
| @@ -960,7 +959,12 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, | |||
| 960 | MAC_STATS_IN_PERIODIC_NOEVENT, 1, | 959 | MAC_STATS_IN_PERIODIC_NOEVENT, 1, |
| 961 | MAC_STATS_IN_PERIOD_MS, period); | 960 | MAC_STATS_IN_PERIOD_MS, period); |
| 962 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); | 961 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); |
| 963 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id); | 962 | |
| 963 | if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { | ||
| 964 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | ||
| 965 | |||
| 966 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id); | ||
| 967 | } | ||
| 964 | 968 | ||
| 965 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), | 969 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), |
| 966 | NULL, 0, NULL); | 970 | NULL, 0, NULL); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index db157a47000c..72ec711fcba2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | |||
| @@ -204,6 +204,7 @@ int stmmac_mdio_register(struct net_device *ndev) | |||
| 204 | struct stmmac_priv *priv = netdev_priv(ndev); | 204 | struct stmmac_priv *priv = netdev_priv(ndev); |
| 205 | struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; | 205 | struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; |
| 206 | struct device_node *mdio_node = priv->plat->mdio_node; | 206 | struct device_node *mdio_node = priv->plat->mdio_node; |
| 207 | struct device *dev = ndev->dev.parent; | ||
| 207 | int addr, found; | 208 | int addr, found; |
| 208 | 209 | ||
| 209 | if (!mdio_bus_data) | 210 | if (!mdio_bus_data) |
| @@ -237,7 +238,7 @@ int stmmac_mdio_register(struct net_device *ndev) | |||
| 237 | else | 238 | else |
| 238 | err = mdiobus_register(new_bus); | 239 | err = mdiobus_register(new_bus); |
| 239 | if (err != 0) { | 240 | if (err != 0) { |
| 240 | netdev_err(ndev, "Cannot register the MDIO bus\n"); | 241 | dev_err(dev, "Cannot register the MDIO bus\n"); |
| 241 | goto bus_register_fail; | 242 | goto bus_register_fail; |
| 242 | } | 243 | } |
| 243 | 244 | ||
| @@ -285,14 +286,12 @@ int stmmac_mdio_register(struct net_device *ndev) | |||
| 285 | irq_str = irq_num; | 286 | irq_str = irq_num; |
| 286 | break; | 287 | break; |
| 287 | } | 288 | } |
| 288 | netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", | 289 | phy_attached_info(phydev); |
| 289 | phydev->phy_id, addr, irq_str, phydev_name(phydev), | ||
| 290 | act ? " active" : ""); | ||
| 291 | found = 1; | 290 | found = 1; |
| 292 | } | 291 | } |
| 293 | 292 | ||
| 294 | if (!found && !mdio_node) { | 293 | if (!found && !mdio_node) { |
| 295 | netdev_warn(ndev, "No PHY found\n"); | 294 | dev_warn(dev, "No PHY found\n"); |
| 296 | mdiobus_unregister(new_bus); | 295 | mdiobus_unregister(new_bus); |
| 297 | mdiobus_free(new_bus); | 296 | mdiobus_free(new_bus); |
| 298 | return -ENODEV; | 297 | return -ENODEV; |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 32ad87345f57..0a2c0a42283f 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -1879,6 +1879,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
| 1879 | 1879 | ||
| 1880 | err_detach: | 1880 | err_detach: |
| 1881 | tun_detach_all(dev); | 1881 | tun_detach_all(dev); |
| 1882 | /* register_netdevice() already called tun_free_netdev() */ | ||
| 1883 | goto err_free_dev; | ||
| 1884 | |||
| 1882 | err_free_flow: | 1885 | err_free_flow: |
| 1883 | tun_flow_uninit(tun); | 1886 | tun_flow_uninit(tun); |
| 1884 | security_tun_dev_free_security(tun->security); | 1887 | security_tun_dev_free_security(tun->security); |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index d21258d277ce..f1b60740e020 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c | |||
| @@ -159,8 +159,10 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) | |||
| 159 | 159 | ||
| 160 | brcmf_feat_firmware_capabilities(ifp); | 160 | brcmf_feat_firmware_capabilities(ifp); |
| 161 | memset(&gscan_cfg, 0, sizeof(gscan_cfg)); | 161 | memset(&gscan_cfg, 0, sizeof(gscan_cfg)); |
| 162 | brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN, "pfn_gscan_cfg", | 162 | if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID) |
| 163 | &gscan_cfg, sizeof(gscan_cfg)); | 163 | brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN, |
| 164 | "pfn_gscan_cfg", | ||
| 165 | &gscan_cfg, sizeof(gscan_cfg)); | ||
| 164 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn"); | 166 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn"); |
| 165 | if (drvr->bus_if->wowl_supported) | 167 | if (drvr->bus_if->wowl_supported) |
| 166 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); | 168 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); |
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index b4ecd1fe1374..97208ce19f92 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c | |||
| @@ -154,7 +154,7 @@ static const struct iwl_tt_params iwl9000_tt_params = { | |||
| 154 | const struct iwl_cfg iwl9160_2ac_cfg = { | 154 | const struct iwl_cfg iwl9160_2ac_cfg = { |
| 155 | .name = "Intel(R) Dual Band Wireless AC 9160", | 155 | .name = "Intel(R) Dual Band Wireless AC 9160", |
| 156 | .fw_name_pre = IWL9260A_FW_PRE, | 156 | .fw_name_pre = IWL9260A_FW_PRE, |
| 157 | .fw_name_pre_next_step = IWL9260B_FW_PRE, | 157 | .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, |
| 158 | IWL_DEVICE_9000, | 158 | IWL_DEVICE_9000, |
| 159 | .ht_params = &iwl9000_ht_params, | 159 | .ht_params = &iwl9000_ht_params, |
| 160 | .nvm_ver = IWL9000_NVM_VERSION, | 160 | .nvm_ver = IWL9000_NVM_VERSION, |
| @@ -165,7 +165,7 @@ const struct iwl_cfg iwl9160_2ac_cfg = { | |||
| 165 | const struct iwl_cfg iwl9260_2ac_cfg = { | 165 | const struct iwl_cfg iwl9260_2ac_cfg = { |
| 166 | .name = "Intel(R) Dual Band Wireless AC 9260", | 166 | .name = "Intel(R) Dual Band Wireless AC 9260", |
| 167 | .fw_name_pre = IWL9260A_FW_PRE, | 167 | .fw_name_pre = IWL9260A_FW_PRE, |
| 168 | .fw_name_pre_next_step = IWL9260B_FW_PRE, | 168 | .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, |
| 169 | IWL_DEVICE_9000, | 169 | IWL_DEVICE_9000, |
| 170 | .ht_params = &iwl9000_ht_params, | 170 | .ht_params = &iwl9000_ht_params, |
| 171 | .nvm_ver = IWL9000_NVM_VERSION, | 171 | .nvm_ver = IWL9000_NVM_VERSION, |
| @@ -176,7 +176,7 @@ const struct iwl_cfg iwl9260_2ac_cfg = { | |||
| 176 | const struct iwl_cfg iwl9270_2ac_cfg = { | 176 | const struct iwl_cfg iwl9270_2ac_cfg = { |
| 177 | .name = "Intel(R) Dual Band Wireless AC 9270", | 177 | .name = "Intel(R) Dual Band Wireless AC 9270", |
| 178 | .fw_name_pre = IWL9260A_FW_PRE, | 178 | .fw_name_pre = IWL9260A_FW_PRE, |
| 179 | .fw_name_pre_next_step = IWL9260B_FW_PRE, | 179 | .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, |
| 180 | IWL_DEVICE_9000, | 180 | IWL_DEVICE_9000, |
| 181 | .ht_params = &iwl9000_ht_params, | 181 | .ht_params = &iwl9000_ht_params, |
| 182 | .nvm_ver = IWL9000_NVM_VERSION, | 182 | .nvm_ver = IWL9000_NVM_VERSION, |
| @@ -186,8 +186,8 @@ const struct iwl_cfg iwl9270_2ac_cfg = { | |||
| 186 | 186 | ||
| 187 | const struct iwl_cfg iwl9460_2ac_cfg = { | 187 | const struct iwl_cfg iwl9460_2ac_cfg = { |
| 188 | .name = "Intel(R) Dual Band Wireless AC 9460", | 188 | .name = "Intel(R) Dual Band Wireless AC 9460", |
| 189 | .fw_name_pre = IWL9000_FW_PRE, | 189 | .fw_name_pre = IWL9260A_FW_PRE, |
| 190 | .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, | 190 | .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, |
| 191 | IWL_DEVICE_9000, | 191 | IWL_DEVICE_9000, |
| 192 | .ht_params = &iwl9000_ht_params, | 192 | .ht_params = &iwl9000_ht_params, |
| 193 | .nvm_ver = IWL9000_NVM_VERSION, | 193 | .nvm_ver = IWL9000_NVM_VERSION, |
| @@ -198,8 +198,8 @@ const struct iwl_cfg iwl9460_2ac_cfg = { | |||
| 198 | 198 | ||
| 199 | const struct iwl_cfg iwl9560_2ac_cfg = { | 199 | const struct iwl_cfg iwl9560_2ac_cfg = { |
| 200 | .name = "Intel(R) Dual Band Wireless AC 9560", | 200 | .name = "Intel(R) Dual Band Wireless AC 9560", |
| 201 | .fw_name_pre = IWL9000_FW_PRE, | 201 | .fw_name_pre = IWL9260A_FW_PRE, |
| 202 | .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, | 202 | .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, |
| 203 | IWL_DEVICE_9000, | 203 | IWL_DEVICE_9000, |
| 204 | .ht_params = &iwl9000_ht_params, | 204 | .ht_params = &iwl9000_ht_params, |
| 205 | .nvm_ver = IWL9000_NVM_VERSION, | 205 | .nvm_ver = IWL9000_NVM_VERSION, |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 0fa8c473f1e2..c73a6438ce8f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h | |||
| @@ -328,6 +328,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; | |||
| 328 | * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger | 328 | * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger |
| 329 | * command size (command version 4) that supports toggling ACK TX | 329 | * command size (command version 4) that supports toggling ACK TX |
| 330 | * power reduction. | 330 | * power reduction. |
| 331 | * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload | ||
| 331 | * | 332 | * |
| 332 | * @NUM_IWL_UCODE_TLV_CAPA: number of bits used | 333 | * @NUM_IWL_UCODE_TLV_CAPA: number of bits used |
| 333 | */ | 334 | */ |
| @@ -373,6 +374,7 @@ enum iwl_ucode_tlv_capa { | |||
| 373 | IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80, | 374 | IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80, |
| 374 | IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81, | 375 | IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81, |
| 375 | IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84, | 376 | IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84, |
| 377 | IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96, | ||
| 376 | 378 | ||
| 377 | NUM_IWL_UCODE_TLV_CAPA | 379 | NUM_IWL_UCODE_TLV_CAPA |
| 378 | #ifdef __CHECKER__ | 380 | #ifdef __CHECKER__ |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index c52623cb7c2a..d19c74827fbb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h | |||
| @@ -276,10 +276,10 @@ struct iwl_pwr_tx_backoff { | |||
| 276 | * @fw_name_pre: Firmware filename prefix. The api version and extension | 276 | * @fw_name_pre: Firmware filename prefix. The api version and extension |
| 277 | * (.ucode) will be added to filename before loading from disk. The | 277 | * (.ucode) will be added to filename before loading from disk. The |
| 278 | * filename is constructed as fw_name_pre<api>.ucode. | 278 | * filename is constructed as fw_name_pre<api>.ucode. |
| 279 | * @fw_name_pre_next_step: same as @fw_name_pre, only for next step | 279 | * @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps |
| 280 | * (if supported) | 280 | * (if supported) |
| 281 | * @fw_name_pre_rf_next_step: same as @fw_name_pre_next_step, only for rf next | 281 | * @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf |
| 282 | * step. Supported only in integrated solutions. | 282 | * next step. Supported only in integrated solutions. |
| 283 | * @ucode_api_max: Highest version of uCode API supported by driver. | 283 | * @ucode_api_max: Highest version of uCode API supported by driver. |
| 284 | * @ucode_api_min: Lowest version of uCode API supported by driver. | 284 | * @ucode_api_min: Lowest version of uCode API supported by driver. |
| 285 | * @max_inst_size: The maximal length of the fw inst section | 285 | * @max_inst_size: The maximal length of the fw inst section |
| @@ -330,7 +330,7 @@ struct iwl_cfg { | |||
| 330 | /* params specific to an individual device within a device family */ | 330 | /* params specific to an individual device within a device family */ |
| 331 | const char *name; | 331 | const char *name; |
| 332 | const char *fw_name_pre; | 332 | const char *fw_name_pre; |
| 333 | const char *fw_name_pre_next_step; | 333 | const char *fw_name_pre_b_or_c_step; |
| 334 | const char *fw_name_pre_rf_next_step; | 334 | const char *fw_name_pre_rf_next_step; |
| 335 | /* params not likely to change within a device family */ | 335 | /* params not likely to change within a device family */ |
| 336 | const struct iwl_base_params *base_params; | 336 | const struct iwl_base_params *base_params; |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 6fdb5921e17f..4e0f86fe0a6f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c | |||
| @@ -216,8 +216,9 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) | |||
| 216 | const char *fw_pre_name; | 216 | const char *fw_pre_name; |
| 217 | 217 | ||
| 218 | if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 && | 218 | if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 && |
| 219 | CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP) | 219 | (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP || |
| 220 | fw_pre_name = cfg->fw_name_pre_next_step; | 220 | CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP)) |
| 221 | fw_pre_name = cfg->fw_name_pre_b_or_c_step; | ||
| 221 | else if (drv->trans->cfg->integrated && | 222 | else if (drv->trans->cfg->integrated && |
| 222 | CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP && | 223 | CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP && |
| 223 | cfg->fw_name_pre_rf_next_step) | 224 | cfg->fw_name_pre_rf_next_step) |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 5c08f4d40f6a..3ee6767392b6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | |||
| @@ -785,7 +785,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 785 | int num_of_ch, __le32 *channels, u16 fw_mcc) | 785 | int num_of_ch, __le32 *channels, u16 fw_mcc) |
| 786 | { | 786 | { |
| 787 | int ch_idx; | 787 | int ch_idx; |
| 788 | u16 ch_flags, prev_ch_flags = 0; | 788 | u16 ch_flags; |
| 789 | u32 reg_rule_flags, prev_reg_rule_flags = 0; | ||
| 789 | const u8 *nvm_chan = cfg->ext_nvm ? | 790 | const u8 *nvm_chan = cfg->ext_nvm ? |
| 790 | iwl_ext_nvm_channels : iwl_nvm_channels; | 791 | iwl_ext_nvm_channels : iwl_nvm_channels; |
| 791 | struct ieee80211_regdomain *regd; | 792 | struct ieee80211_regdomain *regd; |
| @@ -834,8 +835,11 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 834 | continue; | 835 | continue; |
| 835 | } | 836 | } |
| 836 | 837 | ||
| 838 | reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, | ||
| 839 | ch_flags, cfg); | ||
| 840 | |||
| 837 | /* we can't continue the same rule */ | 841 | /* we can't continue the same rule */ |
| 838 | if (ch_idx == 0 || prev_ch_flags != ch_flags || | 842 | if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags || |
| 839 | center_freq - prev_center_freq > 20) { | 843 | center_freq - prev_center_freq > 20) { |
| 840 | valid_rules++; | 844 | valid_rules++; |
| 841 | new_rule = true; | 845 | new_rule = true; |
| @@ -854,18 +858,17 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 854 | rule->power_rule.max_eirp = | 858 | rule->power_rule.max_eirp = |
| 855 | DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); | 859 | DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); |
| 856 | 860 | ||
| 857 | rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, | 861 | rule->flags = reg_rule_flags; |
| 858 | ch_flags, cfg); | ||
| 859 | 862 | ||
| 860 | /* rely on auto-calculation to merge BW of contiguous chans */ | 863 | /* rely on auto-calculation to merge BW of contiguous chans */ |
| 861 | rule->flags |= NL80211_RRF_AUTO_BW; | 864 | rule->flags |= NL80211_RRF_AUTO_BW; |
| 862 | rule->freq_range.max_bandwidth_khz = 0; | 865 | rule->freq_range.max_bandwidth_khz = 0; |
| 863 | 866 | ||
| 864 | prev_ch_flags = ch_flags; | ||
| 865 | prev_center_freq = center_freq; | 867 | prev_center_freq = center_freq; |
| 868 | prev_reg_rule_flags = reg_rule_flags; | ||
| 866 | 869 | ||
| 867 | IWL_DEBUG_DEV(dev, IWL_DL_LAR, | 870 | IWL_DEBUG_DEV(dev, IWL_DL_LAR, |
| 868 | "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n", | 871 | "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x) reg_flags 0x%x: %s\n", |
| 869 | center_freq, | 872 | center_freq, |
| 870 | band == NL80211_BAND_5GHZ ? "5.2" : "2.4", | 873 | band == NL80211_BAND_5GHZ ? "5.2" : "2.4", |
| 871 | CHECK_AND_PRINT_I(VALID), | 874 | CHECK_AND_PRINT_I(VALID), |
| @@ -877,10 +880,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 877 | CHECK_AND_PRINT_I(160MHZ), | 880 | CHECK_AND_PRINT_I(160MHZ), |
| 878 | CHECK_AND_PRINT_I(INDOOR_ONLY), | 881 | CHECK_AND_PRINT_I(INDOOR_ONLY), |
| 879 | CHECK_AND_PRINT_I(GO_CONCURRENT), | 882 | CHECK_AND_PRINT_I(GO_CONCURRENT), |
| 880 | ch_flags, | 883 | ch_flags, reg_rule_flags, |
| 881 | ((ch_flags & NVM_CHANNEL_ACTIVE) && | 884 | ((ch_flags & NVM_CHANNEL_ACTIVE) && |
| 882 | !(ch_flags & NVM_CHANNEL_RADAR)) | 885 | !(ch_flags & NVM_CHANNEL_RADAR)) |
| 883 | ? "" : "not "); | 886 | ? "Ad-Hoc" : ""); |
| 884 | } | 887 | } |
| 885 | 888 | ||
| 886 | regd->n_reg_rules = valid_rules; | 889 | regd->n_reg_rules = valid_rules; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 79e7a7a285dc..82863e9273eb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c | |||
| @@ -1275,8 +1275,10 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm) | |||
| 1275 | 1275 | ||
| 1276 | entry = &wifi_pkg->package.elements[idx++]; | 1276 | entry = &wifi_pkg->package.elements[idx++]; |
| 1277 | if ((entry->type != ACPI_TYPE_INTEGER) || | 1277 | if ((entry->type != ACPI_TYPE_INTEGER) || |
| 1278 | (entry->integer.value > U8_MAX)) | 1278 | (entry->integer.value > U8_MAX)) { |
| 1279 | return -EINVAL; | 1279 | ret = -EINVAL; |
| 1280 | goto out_free; | ||
| 1281 | } | ||
| 1280 | 1282 | ||
| 1281 | mvm->geo_profiles[i].values[j] = entry->integer.value; | 1283 | mvm->geo_profiles[i].values[j] = entry->integer.value; |
| 1282 | } | 1284 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index c7b1e58e3384..ce901be5fba8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
| @@ -2597,8 +2597,18 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, | |||
| 2597 | spin_lock_bh(&mvm_sta->lock); | 2597 | spin_lock_bh(&mvm_sta->lock); |
| 2598 | for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { | 2598 | for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { |
| 2599 | tid_data = &mvm_sta->tid_data[i]; | 2599 | tid_data = &mvm_sta->tid_data[i]; |
| 2600 | while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) | 2600 | |
| 2601 | while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) { | ||
| 2602 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
| 2603 | |||
| 2604 | /* | ||
| 2605 | * The first deferred frame should've stopped the MAC | ||
| 2606 | * queues, so we should never get a second deferred | ||
| 2607 | * frame for the RA/TID. | ||
| 2608 | */ | ||
| 2609 | iwl_mvm_start_mac_queues(mvm, info->hw_queue); | ||
| 2601 | ieee80211_free_txskb(mvm->hw, skb); | 2610 | ieee80211_free_txskb(mvm->hw, skb); |
| 2611 | } | ||
| 2602 | } | 2612 | } |
| 2603 | spin_unlock_bh(&mvm_sta->lock); | 2613 | spin_unlock_bh(&mvm_sta->lock); |
| 2604 | } | 2614 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 65beca3a457a..8999a1199d60 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c | |||
| @@ -1291,7 +1291,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
| 1291 | * first index into rate scale table. | 1291 | * first index into rate scale table. |
| 1292 | */ | 1292 | */ |
| 1293 | if (info->flags & IEEE80211_TX_STAT_AMPDU) { | 1293 | if (info->flags & IEEE80211_TX_STAT_AMPDU) { |
| 1294 | rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index, | 1294 | rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index, |
| 1295 | info->status.ampdu_len, | 1295 | info->status.ampdu_len, |
| 1296 | info->status.ampdu_ack_len, | 1296 | info->status.ampdu_ack_len, |
| 1297 | reduced_txp); | 1297 | reduced_txp); |
| @@ -1312,7 +1312,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
| 1312 | if (info->status.ampdu_ack_len == 0) | 1312 | if (info->status.ampdu_ack_len == 0) |
| 1313 | info->status.ampdu_len = 1; | 1313 | info->status.ampdu_len = 1; |
| 1314 | 1314 | ||
| 1315 | rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index, | 1315 | rs_collect_tlc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index, |
| 1316 | info->status.ampdu_len, | 1316 | info->status.ampdu_len, |
| 1317 | info->status.ampdu_ack_len); | 1317 | info->status.ampdu_ack_len); |
| 1318 | 1318 | ||
| @@ -1348,11 +1348,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
| 1348 | continue; | 1348 | continue; |
| 1349 | 1349 | ||
| 1350 | rs_collect_tpc_data(mvm, lq_sta, tmp_tbl, | 1350 | rs_collect_tpc_data(mvm, lq_sta, tmp_tbl, |
| 1351 | lq_rate.index, 1, | 1351 | tx_resp_rate.index, 1, |
| 1352 | i < retries ? 0 : legacy_success, | 1352 | i < retries ? 0 : legacy_success, |
| 1353 | reduced_txp); | 1353 | reduced_txp); |
| 1354 | rs_collect_tlc_data(mvm, lq_sta, tmp_tbl, | 1354 | rs_collect_tlc_data(mvm, lq_sta, tmp_tbl, |
| 1355 | lq_rate.index, 1, | 1355 | tx_resp_rate.index, 1, |
| 1356 | i < retries ? 0 : legacy_success); | 1356 | i < retries ? 0 : legacy_success); |
| 1357 | } | 1357 | } |
| 1358 | 1358 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index f3e608196369..71c8b800ffa9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | |||
| @@ -636,9 +636,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, | |||
| 636 | 636 | ||
| 637 | baid_data = rcu_dereference(mvm->baid_map[baid]); | 637 | baid_data = rcu_dereference(mvm->baid_map[baid]); |
| 638 | if (!baid_data) { | 638 | if (!baid_data) { |
| 639 | WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN), | 639 | IWL_DEBUG_RX(mvm, |
| 640 | "Received baid %d, but no data exists for this BAID\n", | 640 | "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n", |
| 641 | baid); | 641 | baid, reorder); |
| 642 | return false; | 642 | return false; |
| 643 | } | 643 | } |
| 644 | 644 | ||
| @@ -759,7 +759,9 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, | |||
| 759 | 759 | ||
| 760 | data = rcu_dereference(mvm->baid_map[baid]); | 760 | data = rcu_dereference(mvm->baid_map[baid]); |
| 761 | if (!data) { | 761 | if (!data) { |
| 762 | WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN)); | 762 | IWL_DEBUG_RX(mvm, |
| 763 | "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n", | ||
| 764 | baid, reorder_data); | ||
| 763 | goto out; | 765 | goto out; |
| 764 | } | 766 | } |
| 765 | 767 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index ab66b4394dfc..027ee5e72172 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c | |||
| @@ -121,7 +121,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
| 121 | .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), | 121 | .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), |
| 122 | .add_modify = update ? 1 : 0, | 122 | .add_modify = update ? 1 : 0, |
| 123 | .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | | 123 | .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | |
| 124 | STA_FLG_MIMO_EN_MSK), | 124 | STA_FLG_MIMO_EN_MSK | |
| 125 | STA_FLG_RTS_MIMO_PROT), | ||
| 125 | .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), | 126 | .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), |
| 126 | }; | 127 | }; |
| 127 | int ret; | 128 | int ret; |
| @@ -290,8 +291,8 @@ static void iwl_mvm_rx_agg_session_expired(unsigned long data) | |||
| 290 | goto unlock; | 291 | goto unlock; |
| 291 | 292 | ||
| 292 | mvm_sta = iwl_mvm_sta_from_mac80211(sta); | 293 | mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
| 293 | ieee80211_stop_rx_ba_session_offl(mvm_sta->vif, | 294 | ieee80211_rx_ba_timer_expired(mvm_sta->vif, |
| 294 | sta->addr, ba_data->tid); | 295 | sta->addr, ba_data->tid); |
| 295 | unlock: | 296 | unlock: |
| 296 | rcu_read_unlock(); | 297 | rcu_read_unlock(); |
| 297 | } | 298 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 60360ed73f26..5fcc9dd6be56 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
| @@ -185,8 +185,14 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
| 185 | else | 185 | else |
| 186 | udp_hdr(skb)->check = 0; | 186 | udp_hdr(skb)->check = 0; |
| 187 | 187 | ||
| 188 | /* mac header len should include IV, size is in words */ | 188 | /* |
| 189 | if (info->control.hw_key) | 189 | * mac header len should include IV, size is in words unless |
| 190 | * the IV is added by the firmware like in WEP. | ||
| 191 | * In new Tx API, the IV is always added by the firmware. | ||
| 192 | */ | ||
| 193 | if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key && | ||
| 194 | info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 && | ||
| 195 | info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104) | ||
| 190 | mh_len += info->control.hw_key->iv_len; | 196 | mh_len += info->control.hw_key->iv_len; |
| 191 | mh_len /= 2; | 197 | mh_len /= 2; |
| 192 | offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; | 198 | offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; |
| @@ -1815,6 +1821,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) | |||
| 1815 | struct iwl_mvm_tid_data *tid_data; | 1821 | struct iwl_mvm_tid_data *tid_data; |
| 1816 | struct iwl_mvm_sta *mvmsta; | 1822 | struct iwl_mvm_sta *mvmsta; |
| 1817 | 1823 | ||
| 1824 | ba_info.flags = IEEE80211_TX_STAT_AMPDU; | ||
| 1825 | |||
| 1818 | if (iwl_mvm_has_new_tx_api(mvm)) { | 1826 | if (iwl_mvm_has_new_tx_api(mvm)) { |
| 1819 | struct iwl_mvm_compressed_ba_notif *ba_res = | 1827 | struct iwl_mvm_compressed_ba_notif *ba_res = |
| 1820 | (void *)pkt->data; | 1828 | (void *)pkt->data; |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index f16c1bb9bf94..84f4ba01e14f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c | |||
| @@ -510,9 +510,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
| 510 | 510 | ||
| 511 | /* 9000 Series */ | 511 | /* 9000 Series */ |
| 512 | {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, | 512 | {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, |
| 513 | {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, | ||
| 514 | {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, | ||
| 513 | {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)}, | 515 | {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)}, |
| 514 | {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, | 516 | {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, |
| 517 | {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)}, | ||
| 518 | {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, | ||
| 519 | {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)}, | ||
| 520 | {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)}, | ||
| 521 | {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)}, | ||
| 515 | {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, | 522 | {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, |
| 523 | {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, | ||
| 516 | {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)}, | 524 | {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)}, |
| 517 | {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)}, | 525 | {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)}, |
| 518 | {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)}, | 526 | {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)}, |
| @@ -527,10 +535,22 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
| 527 | {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)}, | 535 | {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)}, |
| 528 | {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)}, | 536 | {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)}, |
| 529 | {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, | 537 | {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, |
| 538 | {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)}, | ||
| 539 | {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)}, | ||
| 540 | {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)}, | ||
| 541 | {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, | ||
| 542 | {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)}, | ||
| 543 | {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)}, | ||
| 544 | {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)}, | ||
| 530 | {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)}, | 545 | {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)}, |
| 531 | {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)}, | 546 | {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)}, |
| 532 | {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)}, | 547 | {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)}, |
| 533 | {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, | 548 | {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, |
| 549 | {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)}, | ||
| 550 | {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)}, | ||
| 551 | {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)}, | ||
| 552 | {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)}, | ||
| 553 | {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)}, | ||
| 534 | {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)}, | 554 | {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)}, |
| 535 | {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)}, | 555 | {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)}, |
| 536 | {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)}, | 556 | {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)}, |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c49f1f8b2e57..37046ac2c441 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
| @@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl, | |||
| 336 | 336 | ||
| 337 | c.directive.opcode = nvme_admin_directive_recv; | 337 | c.directive.opcode = nvme_admin_directive_recv; |
| 338 | c.directive.nsid = cpu_to_le32(nsid); | 338 | c.directive.nsid = cpu_to_le32(nsid); |
| 339 | c.directive.numd = cpu_to_le32(sizeof(*s)); | 339 | c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1); |
| 340 | c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; | 340 | c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; |
| 341 | c.directive.dtype = NVME_DIR_STREAMS; | 341 | c.directive.dtype = NVME_DIR_STREAMS; |
| 342 | 342 | ||
| @@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, | |||
| 1509 | blk_queue_write_cache(q, vwc, vwc); | 1509 | blk_queue_write_cache(q, vwc, vwc); |
| 1510 | } | 1510 | } |
| 1511 | 1511 | ||
| 1512 | static void nvme_configure_apst(struct nvme_ctrl *ctrl) | 1512 | static int nvme_configure_apst(struct nvme_ctrl *ctrl) |
| 1513 | { | 1513 | { |
| 1514 | /* | 1514 | /* |
| 1515 | * APST (Autonomous Power State Transition) lets us program a | 1515 | * APST (Autonomous Power State Transition) lets us program a |
| @@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) | |||
| 1538 | * then don't do anything. | 1538 | * then don't do anything. |
| 1539 | */ | 1539 | */ |
| 1540 | if (!ctrl->apsta) | 1540 | if (!ctrl->apsta) |
| 1541 | return; | 1541 | return 0; |
| 1542 | 1542 | ||
| 1543 | if (ctrl->npss > 31) { | 1543 | if (ctrl->npss > 31) { |
| 1544 | dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); | 1544 | dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); |
| 1545 | return; | 1545 | return 0; |
| 1546 | } | 1546 | } |
| 1547 | 1547 | ||
| 1548 | table = kzalloc(sizeof(*table), GFP_KERNEL); | 1548 | table = kzalloc(sizeof(*table), GFP_KERNEL); |
| 1549 | if (!table) | 1549 | if (!table) |
| 1550 | return; | 1550 | return 0; |
| 1551 | 1551 | ||
| 1552 | if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { | 1552 | if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { |
| 1553 | /* Turn off APST. */ | 1553 | /* Turn off APST. */ |
| @@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) | |||
| 1629 | dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); | 1629 | dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); |
| 1630 | 1630 | ||
| 1631 | kfree(table); | 1631 | kfree(table); |
| 1632 | return ret; | ||
| 1632 | } | 1633 | } |
| 1633 | 1634 | ||
| 1634 | static void nvme_set_latency_tolerance(struct device *dev, s32 val) | 1635 | static void nvme_set_latency_tolerance(struct device *dev, s32 val) |
| @@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) | |||
| 1835 | * In fabrics we need to verify the cntlid matches the | 1836 | * In fabrics we need to verify the cntlid matches the |
| 1836 | * admin connect | 1837 | * admin connect |
| 1837 | */ | 1838 | */ |
| 1838 | if (ctrl->cntlid != le16_to_cpu(id->cntlid)) | 1839 | if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { |
| 1839 | ret = -EINVAL; | 1840 | ret = -EINVAL; |
| 1841 | goto out_free; | ||
| 1842 | } | ||
| 1840 | 1843 | ||
| 1841 | if (!ctrl->opts->discovery_nqn && !ctrl->kas) { | 1844 | if (!ctrl->opts->discovery_nqn && !ctrl->kas) { |
| 1842 | dev_err(ctrl->device, | 1845 | dev_err(ctrl->device, |
| 1843 | "keep-alive support is mandatory for fabrics\n"); | 1846 | "keep-alive support is mandatory for fabrics\n"); |
| 1844 | ret = -EINVAL; | 1847 | ret = -EINVAL; |
| 1848 | goto out_free; | ||
| 1845 | } | 1849 | } |
| 1846 | } else { | 1850 | } else { |
| 1847 | ctrl->cntlid = le16_to_cpu(id->cntlid); | 1851 | ctrl->cntlid = le16_to_cpu(id->cntlid); |
| @@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) | |||
| 1856 | else if (!ctrl->apst_enabled && prev_apst_enabled) | 1860 | else if (!ctrl->apst_enabled && prev_apst_enabled) |
| 1857 | dev_pm_qos_hide_latency_tolerance(ctrl->device); | 1861 | dev_pm_qos_hide_latency_tolerance(ctrl->device); |
| 1858 | 1862 | ||
| 1859 | nvme_configure_apst(ctrl); | 1863 | ret = nvme_configure_apst(ctrl); |
| 1860 | nvme_configure_directives(ctrl); | 1864 | if (ret < 0) |
| 1865 | return ret; | ||
| 1866 | |||
| 1867 | ret = nvme_configure_directives(ctrl); | ||
| 1868 | if (ret < 0) | ||
| 1869 | return ret; | ||
| 1861 | 1870 | ||
| 1862 | ctrl->identified = true; | 1871 | ctrl->identified = true; |
| 1863 | 1872 | ||
| 1873 | return 0; | ||
| 1874 | |||
| 1875 | out_free: | ||
| 1876 | kfree(id); | ||
| 1864 | return ret; | 1877 | return ret; |
| 1865 | } | 1878 | } |
| 1866 | EXPORT_SYMBOL_GPL(nvme_init_identify); | 1879 | EXPORT_SYMBOL_GPL(nvme_init_identify); |
| @@ -2004,9 +2017,11 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, | |||
| 2004 | if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) | 2017 | if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) |
| 2005 | return sprintf(buf, "eui.%8phN\n", ns->eui); | 2018 | return sprintf(buf, "eui.%8phN\n", ns->eui); |
| 2006 | 2019 | ||
| 2007 | while (ctrl->serial[serial_len - 1] == ' ') | 2020 | while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' || |
| 2021 | ctrl->serial[serial_len - 1] == '\0')) | ||
| 2008 | serial_len--; | 2022 | serial_len--; |
| 2009 | while (ctrl->model[model_len - 1] == ' ') | 2023 | while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' || |
| 2024 | ctrl->model[model_len - 1] == '\0')) | ||
| 2010 | model_len--; | 2025 | model_len--; |
| 2011 | 2026 | ||
| 2012 | return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, | 2027 | return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, |
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 2e582a240943..5f5cd306f76d 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c | |||
| @@ -794,7 +794,8 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts, | |||
| 794 | int i; | 794 | int i; |
| 795 | 795 | ||
| 796 | for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { | 796 | for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { |
| 797 | if (opt_tokens[i].token & ~allowed_opts) { | 797 | if ((opt_tokens[i].token & opts->mask) && |
| 798 | (opt_tokens[i].token & ~allowed_opts)) { | ||
| 798 | pr_warn("invalid parameter '%s'\n", | 799 | pr_warn("invalid parameter '%s'\n", |
| 799 | opt_tokens[i].pattern); | 800 | opt_tokens[i].pattern); |
| 800 | } | 801 | } |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index cd888a47d0fc..925467b31a33 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -801,6 +801,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, | |||
| 801 | return; | 801 | return; |
| 802 | } | 802 | } |
| 803 | 803 | ||
| 804 | nvmeq->cqe_seen = 1; | ||
| 804 | req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); | 805 | req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); |
| 805 | nvme_end_request(req, cqe->status, cqe->result); | 806 | nvme_end_request(req, cqe->status, cqe->result); |
| 806 | } | 807 | } |
| @@ -830,10 +831,8 @@ static void nvme_process_cq(struct nvme_queue *nvmeq) | |||
| 830 | consumed++; | 831 | consumed++; |
| 831 | } | 832 | } |
| 832 | 833 | ||
| 833 | if (consumed) { | 834 | if (consumed) |
| 834 | nvme_ring_cq_doorbell(nvmeq); | 835 | nvme_ring_cq_doorbell(nvmeq); |
| 835 | nvmeq->cqe_seen = 1; | ||
| 836 | } | ||
| 837 | } | 836 | } |
| 838 | 837 | ||
| 839 | static irqreturn_t nvme_irq(int irq, void *data) | 838 | static irqreturn_t nvme_irq(int irq, void *data) |
| @@ -1558,11 +1557,9 @@ static inline void nvme_release_cmb(struct nvme_dev *dev) | |||
| 1558 | if (dev->cmb) { | 1557 | if (dev->cmb) { |
| 1559 | iounmap(dev->cmb); | 1558 | iounmap(dev->cmb); |
| 1560 | dev->cmb = NULL; | 1559 | dev->cmb = NULL; |
| 1561 | if (dev->cmbsz) { | 1560 | sysfs_remove_file_from_group(&dev->ctrl.device->kobj, |
| 1562 | sysfs_remove_file_from_group(&dev->ctrl.device->kobj, | 1561 | &dev_attr_cmb.attr, NULL); |
| 1563 | &dev_attr_cmb.attr, NULL); | 1562 | dev->cmbsz = 0; |
| 1564 | dev->cmbsz = 0; | ||
| 1565 | } | ||
| 1566 | } | 1563 | } |
| 1567 | } | 1564 | } |
| 1568 | 1565 | ||
| @@ -1953,16 +1950,14 @@ static int nvme_pci_enable(struct nvme_dev *dev) | |||
| 1953 | 1950 | ||
| 1954 | /* | 1951 | /* |
| 1955 | * CMBs can currently only exist on >=1.2 PCIe devices. We only | 1952 | * CMBs can currently only exist on >=1.2 PCIe devices. We only |
| 1956 | * populate sysfs if a CMB is implemented. Note that we add the | 1953 | * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group |
| 1957 | * CMB attribute to the nvme_ctrl kobj which removes the need to remove | 1954 | * has no name we can pass NULL as final argument to |
| 1958 | * it on exit. Since nvme_dev_attrs_group has no name we can pass | 1955 | * sysfs_add_file_to_group. |
| 1959 | * NULL as final argument to sysfs_add_file_to_group. | ||
| 1960 | */ | 1956 | */ |
| 1961 | 1957 | ||
| 1962 | if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { | 1958 | if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { |
| 1963 | dev->cmb = nvme_map_cmb(dev); | 1959 | dev->cmb = nvme_map_cmb(dev); |
| 1964 | 1960 | if (dev->cmb) { | |
| 1965 | if (dev->cmbsz) { | ||
| 1966 | if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, | 1961 | if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, |
| 1967 | &dev_attr_cmb.attr, NULL)) | 1962 | &dev_attr_cmb.attr, NULL)) |
| 1968 | dev_warn(dev->ctrl.device, | 1963 | dev_warn(dev->ctrl.device, |
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 2d7a98ab53fb..a53bb6635b83 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c | |||
| @@ -199,12 +199,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) | |||
| 199 | copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1); | 199 | copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1); |
| 200 | copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE)); | 200 | copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE)); |
| 201 | 201 | ||
| 202 | memset(id->mn, ' ', sizeof(id->mn)); | ||
| 203 | strncpy((char *)id->mn, "Linux", sizeof(id->mn)); | ||
| 204 | |||
| 205 | memset(id->fr, ' ', sizeof(id->fr)); | ||
| 206 | strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr)); | ||
| 207 | |||
| 208 | id->rab = 6; | 202 | id->rab = 6; |
| 209 | 203 | ||
| 210 | /* | 204 | /* |
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 31ca55dfcb1d..309c84aa7595 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c | |||
| @@ -114,6 +114,11 @@ struct nvmet_fc_tgtport { | |||
| 114 | struct kref ref; | 114 | struct kref ref; |
| 115 | }; | 115 | }; |
| 116 | 116 | ||
| 117 | struct nvmet_fc_defer_fcp_req { | ||
| 118 | struct list_head req_list; | ||
| 119 | struct nvmefc_tgt_fcp_req *fcp_req; | ||
| 120 | }; | ||
| 121 | |||
| 117 | struct nvmet_fc_tgt_queue { | 122 | struct nvmet_fc_tgt_queue { |
| 118 | bool ninetypercent; | 123 | bool ninetypercent; |
| 119 | u16 qid; | 124 | u16 qid; |
| @@ -132,6 +137,8 @@ struct nvmet_fc_tgt_queue { | |||
| 132 | struct nvmet_fc_tgt_assoc *assoc; | 137 | struct nvmet_fc_tgt_assoc *assoc; |
| 133 | struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ | 138 | struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ |
| 134 | struct list_head fod_list; | 139 | struct list_head fod_list; |
| 140 | struct list_head pending_cmd_list; | ||
| 141 | struct list_head avail_defer_list; | ||
| 135 | struct workqueue_struct *work_q; | 142 | struct workqueue_struct *work_q; |
| 136 | struct kref ref; | 143 | struct kref ref; |
| 137 | } __aligned(sizeof(unsigned long long)); | 144 | } __aligned(sizeof(unsigned long long)); |
| @@ -223,6 +230,8 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); | |||
| 223 | static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); | 230 | static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); |
| 224 | static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); | 231 | static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); |
| 225 | static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); | 232 | static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); |
| 233 | static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, | ||
| 234 | struct nvmet_fc_fcp_iod *fod); | ||
| 226 | 235 | ||
| 227 | 236 | ||
| 228 | /* *********************** FC-NVME DMA Handling **************************** */ | 237 | /* *********************** FC-NVME DMA Handling **************************** */ |
| @@ -385,7 +394,7 @@ nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) | |||
| 385 | static struct nvmet_fc_ls_iod * | 394 | static struct nvmet_fc_ls_iod * |
| 386 | nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) | 395 | nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) |
| 387 | { | 396 | { |
| 388 | static struct nvmet_fc_ls_iod *iod; | 397 | struct nvmet_fc_ls_iod *iod; |
| 389 | unsigned long flags; | 398 | unsigned long flags; |
| 390 | 399 | ||
| 391 | spin_lock_irqsave(&tgtport->lock, flags); | 400 | spin_lock_irqsave(&tgtport->lock, flags); |
| @@ -462,10 +471,10 @@ nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, | |||
| 462 | static struct nvmet_fc_fcp_iod * | 471 | static struct nvmet_fc_fcp_iod * |
| 463 | nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) | 472 | nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) |
| 464 | { | 473 | { |
| 465 | static struct nvmet_fc_fcp_iod *fod; | 474 | struct nvmet_fc_fcp_iod *fod; |
| 466 | unsigned long flags; | 475 | |
| 476 | lockdep_assert_held(&queue->qlock); | ||
| 467 | 477 | ||
| 468 | spin_lock_irqsave(&queue->qlock, flags); | ||
| 469 | fod = list_first_entry_or_null(&queue->fod_list, | 478 | fod = list_first_entry_or_null(&queue->fod_list, |
| 470 | struct nvmet_fc_fcp_iod, fcp_list); | 479 | struct nvmet_fc_fcp_iod, fcp_list); |
| 471 | if (fod) { | 480 | if (fod) { |
| @@ -477,17 +486,37 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) | |||
| 477 | * will "inherit" that reference. | 486 | * will "inherit" that reference. |
| 478 | */ | 487 | */ |
| 479 | } | 488 | } |
| 480 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
| 481 | return fod; | 489 | return fod; |
| 482 | } | 490 | } |
| 483 | 491 | ||
| 484 | 492 | ||
| 485 | static void | 493 | static void |
| 494 | nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, | ||
| 495 | struct nvmet_fc_tgt_queue *queue, | ||
| 496 | struct nvmefc_tgt_fcp_req *fcpreq) | ||
| 497 | { | ||
| 498 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; | ||
| 499 | |||
| 500 | /* | ||
| 501 | * put all admin cmds on hw queue id 0. All io commands go to | ||
| 502 | * the respective hw queue based on a modulo basis | ||
| 503 | */ | ||
| 504 | fcpreq->hwqid = queue->qid ? | ||
| 505 | ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; | ||
| 506 | |||
| 507 | if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) | ||
| 508 | queue_work_on(queue->cpu, queue->work_q, &fod->work); | ||
| 509 | else | ||
| 510 | nvmet_fc_handle_fcp_rqst(tgtport, fod); | ||
| 511 | } | ||
| 512 | |||
| 513 | static void | ||
| 486 | nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, | 514 | nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, |
| 487 | struct nvmet_fc_fcp_iod *fod) | 515 | struct nvmet_fc_fcp_iod *fod) |
| 488 | { | 516 | { |
| 489 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; | 517 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
| 490 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; | 518 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
| 519 | struct nvmet_fc_defer_fcp_req *deferfcp; | ||
| 491 | unsigned long flags; | 520 | unsigned long flags; |
| 492 | 521 | ||
| 493 | fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, | 522 | fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, |
| @@ -495,21 +524,56 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, | |||
| 495 | 524 | ||
| 496 | fcpreq->nvmet_fc_private = NULL; | 525 | fcpreq->nvmet_fc_private = NULL; |
| 497 | 526 | ||
| 498 | spin_lock_irqsave(&queue->qlock, flags); | ||
| 499 | list_add_tail(&fod->fcp_list, &fod->queue->fod_list); | ||
| 500 | fod->active = false; | 527 | fod->active = false; |
| 501 | fod->abort = false; | 528 | fod->abort = false; |
| 502 | fod->aborted = false; | 529 | fod->aborted = false; |
| 503 | fod->writedataactive = false; | 530 | fod->writedataactive = false; |
| 504 | fod->fcpreq = NULL; | 531 | fod->fcpreq = NULL; |
| 532 | |||
| 533 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); | ||
| 534 | |||
| 535 | spin_lock_irqsave(&queue->qlock, flags); | ||
| 536 | deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, | ||
| 537 | struct nvmet_fc_defer_fcp_req, req_list); | ||
| 538 | if (!deferfcp) { | ||
| 539 | list_add_tail(&fod->fcp_list, &fod->queue->fod_list); | ||
| 540 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
| 541 | |||
| 542 | /* Release reference taken at queue lookup and fod allocation */ | ||
| 543 | nvmet_fc_tgt_q_put(queue); | ||
| 544 | return; | ||
| 545 | } | ||
| 546 | |||
| 547 | /* Re-use the fod for the next pending cmd that was deferred */ | ||
| 548 | list_del(&deferfcp->req_list); | ||
| 549 | |||
| 550 | fcpreq = deferfcp->fcp_req; | ||
| 551 | |||
| 552 | /* deferfcp can be reused for another IO at a later date */ | ||
| 553 | list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); | ||
| 554 | |||
| 505 | spin_unlock_irqrestore(&queue->qlock, flags); | 555 | spin_unlock_irqrestore(&queue->qlock, flags); |
| 506 | 556 | ||
| 557 | /* Save NVME CMD IO in fod */ | ||
| 558 | memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); | ||
| 559 | |||
| 560 | /* Setup new fcpreq to be processed */ | ||
| 561 | fcpreq->rspaddr = NULL; | ||
| 562 | fcpreq->rsplen = 0; | ||
| 563 | fcpreq->nvmet_fc_private = fod; | ||
| 564 | fod->fcpreq = fcpreq; | ||
| 565 | fod->active = true; | ||
| 566 | |||
| 567 | /* inform LLDD IO is now being processed */ | ||
| 568 | tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); | ||
| 569 | |||
| 570 | /* Submit deferred IO for processing */ | ||
| 571 | nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); | ||
| 572 | |||
| 507 | /* | 573 | /* |
| 508 | * release the reference taken at queue lookup and fod allocation | 574 | * Leave the queue lookup get reference taken when |
| 575 | * fod was originally allocated. | ||
| 509 | */ | 576 | */ |
| 510 | nvmet_fc_tgt_q_put(queue); | ||
| 511 | |||
| 512 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); | ||
| 513 | } | 577 | } |
| 514 | 578 | ||
| 515 | static int | 579 | static int |
| @@ -569,6 +633,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, | |||
| 569 | queue->port = assoc->tgtport->port; | 633 | queue->port = assoc->tgtport->port; |
| 570 | queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); | 634 | queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); |
| 571 | INIT_LIST_HEAD(&queue->fod_list); | 635 | INIT_LIST_HEAD(&queue->fod_list); |
| 636 | INIT_LIST_HEAD(&queue->avail_defer_list); | ||
| 637 | INIT_LIST_HEAD(&queue->pending_cmd_list); | ||
| 572 | atomic_set(&queue->connected, 0); | 638 | atomic_set(&queue->connected, 0); |
| 573 | atomic_set(&queue->sqtail, 0); | 639 | atomic_set(&queue->sqtail, 0); |
| 574 | atomic_set(&queue->rsn, 1); | 640 | atomic_set(&queue->rsn, 1); |
| @@ -638,6 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) | |||
| 638 | { | 704 | { |
| 639 | struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; | 705 | struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; |
| 640 | struct nvmet_fc_fcp_iod *fod = queue->fod; | 706 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
| 707 | struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; | ||
| 641 | unsigned long flags; | 708 | unsigned long flags; |
| 642 | int i, writedataactive; | 709 | int i, writedataactive; |
| 643 | bool disconnect; | 710 | bool disconnect; |
| @@ -666,6 +733,36 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) | |||
| 666 | } | 733 | } |
| 667 | } | 734 | } |
| 668 | } | 735 | } |
| 736 | |||
| 737 | /* Cleanup defer'ed IOs in queue */ | ||
| 738 | list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, | ||
| 739 | req_list) { | ||
| 740 | list_del(&deferfcp->req_list); | ||
| 741 | kfree(deferfcp); | ||
| 742 | } | ||
| 743 | |||
| 744 | for (;;) { | ||
| 745 | deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, | ||
| 746 | struct nvmet_fc_defer_fcp_req, req_list); | ||
| 747 | if (!deferfcp) | ||
| 748 | break; | ||
| 749 | |||
| 750 | list_del(&deferfcp->req_list); | ||
| 751 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
| 752 | |||
| 753 | tgtport->ops->defer_rcv(&tgtport->fc_target_port, | ||
| 754 | deferfcp->fcp_req); | ||
| 755 | |||
| 756 | tgtport->ops->fcp_abort(&tgtport->fc_target_port, | ||
| 757 | deferfcp->fcp_req); | ||
| 758 | |||
| 759 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, | ||
| 760 | deferfcp->fcp_req); | ||
| 761 | |||
| 762 | kfree(deferfcp); | ||
| 763 | |||
| 764 | spin_lock_irqsave(&queue->qlock, flags); | ||
| 765 | } | ||
| 669 | spin_unlock_irqrestore(&queue->qlock, flags); | 766 | spin_unlock_irqrestore(&queue->qlock, flags); |
| 670 | 767 | ||
| 671 | flush_workqueue(queue->work_q); | 768 | flush_workqueue(queue->work_q); |
| @@ -2172,11 +2269,38 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work) | |||
| 2172 | * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc | 2269 | * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc |
| 2173 | * layer for processing. | 2270 | * layer for processing. |
| 2174 | * | 2271 | * |
| 2175 | * The nvmet-fc layer will copy cmd payload to an internal structure for | 2272 | * The nvmet_fc layer allocates a local job structure (struct |
| 2176 | * processing. As such, upon completion of the routine, the LLDD may | 2273 | * nvmet_fc_fcp_iod) from the queue for the io and copies the |
| 2177 | * immediately free/reuse the CMD IU buffer passed in the call. | 2274 | * CMD IU buffer to the job structure. As such, on a successful |
| 2275 | * completion (returns 0), the LLDD may immediately free/reuse | ||
| 2276 | * the CMD IU buffer passed in the call. | ||
| 2178 | * | 2277 | * |
| 2179 | * If this routine returns error, the lldd should abort the exchange. | 2278 | * However, in some circumstances, due to the packetized nature of FC |
| 2279 | * and the api of the FC LLDD which may issue a hw command to send the | ||
| 2280 | * response, but the LLDD may not get the hw completion for that command | ||
| 2281 | * and upcall the nvmet_fc layer before a new command may be | ||
| 2282 | * asynchronously received - its possible for a command to be received | ||
| 2283 | * before the LLDD and nvmet_fc have recycled the job structure. It gives | ||
| 2284 | * the appearance of more commands received than fits in the sq. | ||
| 2285 | * To alleviate this scenario, a temporary queue is maintained in the | ||
| 2286 | * transport for pending LLDD requests waiting for a queue job structure. | ||
| 2287 | * In these "overrun" cases, a temporary queue element is allocated | ||
| 2288 | * the LLDD request and CMD iu buffer information remembered, and the | ||
| 2289 | * routine returns a -EOVERFLOW status. Subsequently, when a queue job | ||
| 2290 | * structure is freed, it is immediately reallocated for anything on the | ||
| 2291 | * pending request list. The LLDDs defer_rcv() callback is called, | ||
| 2292 | * informing the LLDD that it may reuse the CMD IU buffer, and the io | ||
| 2293 | * is then started normally with the transport. | ||
| 2294 | * | ||
| 2295 | * The LLDD, when receiving an -EOVERFLOW completion status, is to treat | ||
| 2296 | * the completion as successful but must not reuse the CMD IU buffer | ||
| 2297 | * until the LLDD's defer_rcv() callback has been called for the | ||
| 2298 | * corresponding struct nvmefc_tgt_fcp_req pointer. | ||
| 2299 | * | ||
| 2300 | * If there is any other condition in which an error occurs, the | ||
| 2301 | * transport will return a non-zero status indicating the error. | ||
| 2302 | * In all cases other than -EOVERFLOW, the transport has not accepted the | ||
| 2303 | * request and the LLDD should abort the exchange. | ||
| 2180 | * | 2304 | * |
| 2181 | * @target_port: pointer to the (registered) target port the FCP CMD IU | 2305 | * @target_port: pointer to the (registered) target port the FCP CMD IU |
| 2182 | * was received on. | 2306 | * was received on. |
| @@ -2194,6 +2318,8 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, | |||
| 2194 | struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; | 2318 | struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; |
| 2195 | struct nvmet_fc_tgt_queue *queue; | 2319 | struct nvmet_fc_tgt_queue *queue; |
| 2196 | struct nvmet_fc_fcp_iod *fod; | 2320 | struct nvmet_fc_fcp_iod *fod; |
| 2321 | struct nvmet_fc_defer_fcp_req *deferfcp; | ||
| 2322 | unsigned long flags; | ||
| 2197 | 2323 | ||
| 2198 | /* validate iu, so the connection id can be used to find the queue */ | 2324 | /* validate iu, so the connection id can be used to find the queue */ |
| 2199 | if ((cmdiubuf_len != sizeof(*cmdiu)) || | 2325 | if ((cmdiubuf_len != sizeof(*cmdiu)) || |
| @@ -2214,29 +2340,60 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, | |||
| 2214 | * when the fod is freed. | 2340 | * when the fod is freed. |
| 2215 | */ | 2341 | */ |
| 2216 | 2342 | ||
| 2343 | spin_lock_irqsave(&queue->qlock, flags); | ||
| 2344 | |||
| 2217 | fod = nvmet_fc_alloc_fcp_iod(queue); | 2345 | fod = nvmet_fc_alloc_fcp_iod(queue); |
| 2218 | if (!fod) { | 2346 | if (fod) { |
| 2347 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
| 2348 | |||
| 2349 | fcpreq->nvmet_fc_private = fod; | ||
| 2350 | fod->fcpreq = fcpreq; | ||
| 2351 | |||
| 2352 | memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); | ||
| 2353 | |||
| 2354 | nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); | ||
| 2355 | |||
| 2356 | return 0; | ||
| 2357 | } | ||
| 2358 | |||
| 2359 | if (!tgtport->ops->defer_rcv) { | ||
| 2360 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
| 2219 | /* release the queue lookup reference */ | 2361 | /* release the queue lookup reference */ |
| 2220 | nvmet_fc_tgt_q_put(queue); | 2362 | nvmet_fc_tgt_q_put(queue); |
| 2221 | return -ENOENT; | 2363 | return -ENOENT; |
| 2222 | } | 2364 | } |
| 2223 | 2365 | ||
| 2224 | fcpreq->nvmet_fc_private = fod; | 2366 | deferfcp = list_first_entry_or_null(&queue->avail_defer_list, |
| 2225 | fod->fcpreq = fcpreq; | 2367 | struct nvmet_fc_defer_fcp_req, req_list); |
| 2226 | /* | 2368 | if (deferfcp) { |
| 2227 | * put all admin cmds on hw queue id 0. All io commands go to | 2369 | /* Just re-use one that was previously allocated */ |
| 2228 | * the respective hw queue based on a modulo basis | 2370 | list_del(&deferfcp->req_list); |
| 2229 | */ | 2371 | } else { |
| 2230 | fcpreq->hwqid = queue->qid ? | 2372 | spin_unlock_irqrestore(&queue->qlock, flags); |
| 2231 | ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; | ||
| 2232 | memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); | ||
| 2233 | 2373 | ||
| 2234 | if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) | 2374 | /* Now we need to dynamically allocate one */ |
| 2235 | queue_work_on(queue->cpu, queue->work_q, &fod->work); | 2375 | deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); |
| 2236 | else | 2376 | if (!deferfcp) { |
| 2237 | nvmet_fc_handle_fcp_rqst(tgtport, fod); | 2377 | /* release the queue lookup reference */ |
| 2378 | nvmet_fc_tgt_q_put(queue); | ||
| 2379 | return -ENOMEM; | ||
| 2380 | } | ||
| 2381 | spin_lock_irqsave(&queue->qlock, flags); | ||
| 2382 | } | ||
| 2238 | 2383 | ||
| 2239 | return 0; | 2384 | /* For now, use rspaddr / rsplen to save payload information */ |
| 2385 | fcpreq->rspaddr = cmdiubuf; | ||
| 2386 | fcpreq->rsplen = cmdiubuf_len; | ||
| 2387 | deferfcp->fcp_req = fcpreq; | ||
| 2388 | |||
| 2389 | /* defer processing till a fod becomes available */ | ||
| 2390 | list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); | ||
| 2391 | |||
| 2392 | /* NOTE: the queue lookup reference is still valid */ | ||
| 2393 | |||
| 2394 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
| 2395 | |||
| 2396 | return -EOVERFLOW; | ||
| 2240 | } | 2397 | } |
| 2241 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); | 2398 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); |
| 2242 | 2399 | ||
diff --git a/drivers/of/device.c b/drivers/of/device.c index 28c38c756f92..e0a28ea341fe 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c | |||
| @@ -89,6 +89,7 @@ int of_dma_configure(struct device *dev, struct device_node *np) | |||
| 89 | bool coherent; | 89 | bool coherent; |
| 90 | unsigned long offset; | 90 | unsigned long offset; |
| 91 | const struct iommu_ops *iommu; | 91 | const struct iommu_ops *iommu; |
| 92 | u64 mask; | ||
| 92 | 93 | ||
| 93 | /* | 94 | /* |
| 94 | * Set default coherent_dma_mask to 32 bit. Drivers are expected to | 95 | * Set default coherent_dma_mask to 32 bit. Drivers are expected to |
| @@ -134,10 +135,9 @@ int of_dma_configure(struct device *dev, struct device_node *np) | |||
| 134 | * Limit coherent and dma mask based on size and default mask | 135 | * Limit coherent and dma mask based on size and default mask |
| 135 | * set by the driver. | 136 | * set by the driver. |
| 136 | */ | 137 | */ |
| 137 | dev->coherent_dma_mask = min(dev->coherent_dma_mask, | 138 | mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1); |
| 138 | DMA_BIT_MASK(ilog2(dma_addr + size))); | 139 | dev->coherent_dma_mask &= mask; |
| 139 | *dev->dma_mask = min((*dev->dma_mask), | 140 | *dev->dma_mask &= mask; |
| 140 | DMA_BIT_MASK(ilog2(dma_addr + size))); | ||
| 141 | 141 | ||
| 142 | coherent = of_dma_is_coherent(np); | 142 | coherent = of_dma_is_coherent(np); |
| 143 | dev_dbg(dev, "device is%sdma coherent\n", | 143 | dev_dbg(dev, "device is%sdma coherent\n", |
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 5c63b920b471..ed92c1254cff 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c | |||
| @@ -956,7 +956,7 @@ static int __init dino_probe(struct parisc_device *dev) | |||
| 956 | 956 | ||
| 957 | dino_dev->hba.dev = dev; | 957 | dino_dev->hba.dev = dev; |
| 958 | dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096); | 958 | dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096); |
| 959 | dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */ | 959 | dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND; |
| 960 | spin_lock_init(&dino_dev->dinosaur_pen); | 960 | spin_lock_init(&dino_dev->dinosaur_pen); |
| 961 | dino_dev->hba.iommu = ccio_get_iommu(dev); | 961 | dino_dev->hba.iommu = ccio_get_iommu(dev); |
| 962 | 962 | ||
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b4b7eab29400..fdf65a6c13f6 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -514,7 +514,7 @@ EXPORT_SYMBOL(pci_find_resource); | |||
| 514 | */ | 514 | */ |
| 515 | struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev) | 515 | struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev) |
| 516 | { | 516 | { |
| 517 | struct pci_dev *bridge, *highest_pcie_bridge = NULL; | 517 | struct pci_dev *bridge, *highest_pcie_bridge = dev; |
| 518 | 518 | ||
| 519 | bridge = pci_upstream_bridge(dev); | 519 | bridge = pci_upstream_bridge(dev); |
| 520 | while (bridge && pci_is_pcie(bridge)) { | 520 | while (bridge && pci_is_pcie(bridge)) { |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index c31310db0404..e6a917b4acd3 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -1762,6 +1762,48 @@ static void pci_configure_extended_tags(struct pci_dev *dev) | |||
| 1762 | PCI_EXP_DEVCTL_EXT_TAG); | 1762 | PCI_EXP_DEVCTL_EXT_TAG); |
| 1763 | } | 1763 | } |
| 1764 | 1764 | ||
| 1765 | /** | ||
| 1766 | * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable | ||
| 1767 | * @dev: PCI device to query | ||
| 1768 | * | ||
| 1769 | * Returns true if the device has enabled relaxed ordering attribute. | ||
| 1770 | */ | ||
| 1771 | bool pcie_relaxed_ordering_enabled(struct pci_dev *dev) | ||
| 1772 | { | ||
| 1773 | u16 v; | ||
| 1774 | |||
| 1775 | pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v); | ||
| 1776 | |||
| 1777 | return !!(v & PCI_EXP_DEVCTL_RELAX_EN); | ||
| 1778 | } | ||
| 1779 | EXPORT_SYMBOL(pcie_relaxed_ordering_enabled); | ||
| 1780 | |||
| 1781 | static void pci_configure_relaxed_ordering(struct pci_dev *dev) | ||
| 1782 | { | ||
| 1783 | struct pci_dev *root; | ||
| 1784 | |||
| 1785 | /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */ | ||
| 1786 | if (dev->is_virtfn) | ||
| 1787 | return; | ||
| 1788 | |||
| 1789 | if (!pcie_relaxed_ordering_enabled(dev)) | ||
| 1790 | return; | ||
| 1791 | |||
| 1792 | /* | ||
| 1793 | * For now, we only deal with Relaxed Ordering issues with Root | ||
| 1794 | * Ports. Peer-to-Peer DMA is another can of worms. | ||
| 1795 | */ | ||
| 1796 | root = pci_find_pcie_root_port(dev); | ||
| 1797 | if (!root) | ||
| 1798 | return; | ||
| 1799 | |||
| 1800 | if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) { | ||
| 1801 | pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, | ||
| 1802 | PCI_EXP_DEVCTL_RELAX_EN); | ||
| 1803 | dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n"); | ||
| 1804 | } | ||
| 1805 | } | ||
| 1806 | |||
| 1765 | static void pci_configure_device(struct pci_dev *dev) | 1807 | static void pci_configure_device(struct pci_dev *dev) |
| 1766 | { | 1808 | { |
| 1767 | struct hotplug_params hpp; | 1809 | struct hotplug_params hpp; |
| @@ -1769,6 +1811,7 @@ static void pci_configure_device(struct pci_dev *dev) | |||
| 1769 | 1811 | ||
| 1770 | pci_configure_mps(dev); | 1812 | pci_configure_mps(dev); |
| 1771 | pci_configure_extended_tags(dev); | 1813 | pci_configure_extended_tags(dev); |
| 1814 | pci_configure_relaxed_ordering(dev); | ||
| 1772 | 1815 | ||
| 1773 | memset(&hpp, 0, sizeof(hpp)); | 1816 | memset(&hpp, 0, sizeof(hpp)); |
| 1774 | ret = pci_get_hp_params(dev, &hpp); | 1817 | ret = pci_get_hp_params(dev, &hpp); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6967c6b4cf6b..140760403f36 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -4016,6 +4016,95 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8, | |||
| 4016 | quirk_tw686x_class); | 4016 | quirk_tw686x_class); |
| 4017 | 4017 | ||
| 4018 | /* | 4018 | /* |
| 4019 | * Some devices have problems with Transaction Layer Packets with the Relaxed | ||
| 4020 | * Ordering Attribute set. Such devices should mark themselves and other | ||
| 4021 | * Device Drivers should check before sending TLPs with RO set. | ||
| 4022 | */ | ||
| 4023 | static void quirk_relaxedordering_disable(struct pci_dev *dev) | ||
| 4024 | { | ||
| 4025 | dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING; | ||
| 4026 | dev_info(&dev->dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n"); | ||
| 4027 | } | ||
| 4028 | |||
| 4029 | /* | ||
| 4030 | * Intel Xeon processors based on Broadwell/Haswell microarchitecture Root | ||
| 4031 | * Complex has a Flow Control Credit issue which can cause performance | ||
| 4032 | * problems with Upstream Transaction Layer Packets with Relaxed Ordering set. | ||
| 4033 | */ | ||
| 4034 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4035 | quirk_relaxedordering_disable); | ||
| 4036 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4037 | quirk_relaxedordering_disable); | ||
| 4038 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4039 | quirk_relaxedordering_disable); | ||
| 4040 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4041 | quirk_relaxedordering_disable); | ||
| 4042 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4043 | quirk_relaxedordering_disable); | ||
| 4044 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4045 | quirk_relaxedordering_disable); | ||
| 4046 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4047 | quirk_relaxedordering_disable); | ||
| 4048 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4049 | quirk_relaxedordering_disable); | ||
| 4050 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4051 | quirk_relaxedordering_disable); | ||
| 4052 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4053 | quirk_relaxedordering_disable); | ||
| 4054 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4055 | quirk_relaxedordering_disable); | ||
| 4056 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4057 | quirk_relaxedordering_disable); | ||
| 4058 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4059 | quirk_relaxedordering_disable); | ||
| 4060 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4061 | quirk_relaxedordering_disable); | ||
| 4062 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4063 | quirk_relaxedordering_disable); | ||
| 4064 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4065 | quirk_relaxedordering_disable); | ||
| 4066 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4067 | quirk_relaxedordering_disable); | ||
| 4068 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4069 | quirk_relaxedordering_disable); | ||
| 4070 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4071 | quirk_relaxedordering_disable); | ||
| 4072 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4073 | quirk_relaxedordering_disable); | ||
| 4074 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4075 | quirk_relaxedordering_disable); | ||
| 4076 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4077 | quirk_relaxedordering_disable); | ||
| 4078 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4079 | quirk_relaxedordering_disable); | ||
| 4080 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4081 | quirk_relaxedordering_disable); | ||
| 4082 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4083 | quirk_relaxedordering_disable); | ||
| 4084 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4085 | quirk_relaxedordering_disable); | ||
| 4086 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4087 | quirk_relaxedordering_disable); | ||
| 4088 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4089 | quirk_relaxedordering_disable); | ||
| 4090 | |||
| 4091 | /* | ||
| 4092 | * The AMD ARM A1100 (AKA "SEATTLE") SoC has a bug in its PCIe Root Complex | ||
| 4093 | * where Upstream Transaction Layer Packets with the Relaxed Ordering | ||
| 4094 | * Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering | ||
| 4095 | * set. This is a violation of the PCIe 3.0 Transaction Ordering Rules | ||
| 4096 | * outlined in Section 2.4.1 (PCI Express(r) Base Specification Revision 3.0 | ||
| 4097 | * November 10, 2010). As a result, on this platform we can't use Relaxed | ||
| 4098 | * Ordering for Upstream TLPs. | ||
| 4099 | */ | ||
| 4100 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4101 | quirk_relaxedordering_disable); | ||
| 4102 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4103 | quirk_relaxedordering_disable); | ||
| 4104 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4105 | quirk_relaxedordering_disable); | ||
| 4106 | |||
| 4107 | /* | ||
| 4019 | * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same | 4108 | * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same |
| 4020 | * values for the Attribute as were supplied in the header of the | 4109 | * values for the Attribute as were supplied in the header of the |
| 4021 | * corresponding Request, except as explicitly allowed when IDO is used." | 4110 | * corresponding Request, except as explicitly allowed when IDO is used." |
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 4fac49e55d47..4b43aa62fbc7 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c | |||
| @@ -1301,7 +1301,6 @@ static void ds1307_clks_register(struct ds1307 *ds1307) | |||
| 1301 | static const struct regmap_config regmap_config = { | 1301 | static const struct regmap_config regmap_config = { |
| 1302 | .reg_bits = 8, | 1302 | .reg_bits = 8, |
| 1303 | .val_bits = 8, | 1303 | .val_bits = 8, |
| 1304 | .max_register = 0x12, | ||
| 1305 | }; | 1304 | }; |
| 1306 | 1305 | ||
| 1307 | static int ds1307_probe(struct i2c_client *client, | 1306 | static int ds1307_probe(struct i2c_client *client, |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index f4538d7a3016..d145e0d90227 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
| @@ -47,6 +47,17 @@ config SCSI_NETLINK | |||
| 47 | default n | 47 | default n |
| 48 | depends on NET | 48 | depends on NET |
| 49 | 49 | ||
| 50 | config SCSI_MQ_DEFAULT | ||
| 51 | bool "SCSI: use blk-mq I/O path by default" | ||
| 52 | depends on SCSI | ||
| 53 | ---help--- | ||
| 54 | This option enables the new blk-mq based I/O path for SCSI | ||
| 55 | devices by default. With the option the scsi_mod.use_blk_mq | ||
| 56 | module/boot option defaults to Y, without it to N, but it can | ||
| 57 | still be overridden either way. | ||
| 58 | |||
| 59 | If unsure say N. | ||
| 60 | |||
| 50 | config SCSI_PROC_FS | 61 | config SCSI_PROC_FS |
| 51 | bool "legacy /proc/scsi/ support" | 62 | bool "legacy /proc/scsi/ support" |
| 52 | depends on SCSI && PROC_FS | 63 | depends on SCSI && PROC_FS |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 4591113c49de..a1a2c71e1626 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
| @@ -549,7 +549,9 @@ static void get_container_name_callback(void *context, struct fib * fibptr) | |||
| 549 | if ((le32_to_cpu(get_name_reply->status) == CT_OK) | 549 | if ((le32_to_cpu(get_name_reply->status) == CT_OK) |
| 550 | && (get_name_reply->data[0] != '\0')) { | 550 | && (get_name_reply->data[0] != '\0')) { |
| 551 | char *sp = get_name_reply->data; | 551 | char *sp = get_name_reply->data; |
| 552 | sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0'; | 552 | int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); |
| 553 | |||
| 554 | sp[data_size - 1] = '\0'; | ||
| 553 | while (*sp == ' ') | 555 | while (*sp == ' ') |
| 554 | ++sp; | 556 | ++sp; |
| 555 | if (*sp) { | 557 | if (*sp) { |
| @@ -579,12 +581,15 @@ static void get_container_name_callback(void *context, struct fib * fibptr) | |||
| 579 | static int aac_get_container_name(struct scsi_cmnd * scsicmd) | 581 | static int aac_get_container_name(struct scsi_cmnd * scsicmd) |
| 580 | { | 582 | { |
| 581 | int status; | 583 | int status; |
| 584 | int data_size; | ||
| 582 | struct aac_get_name *dinfo; | 585 | struct aac_get_name *dinfo; |
| 583 | struct fib * cmd_fibcontext; | 586 | struct fib * cmd_fibcontext; |
| 584 | struct aac_dev * dev; | 587 | struct aac_dev * dev; |
| 585 | 588 | ||
| 586 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; | 589 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; |
| 587 | 590 | ||
| 591 | data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); | ||
| 592 | |||
| 588 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); | 593 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); |
| 589 | 594 | ||
| 590 | aac_fib_init(cmd_fibcontext); | 595 | aac_fib_init(cmd_fibcontext); |
| @@ -593,7 +598,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd) | |||
| 593 | dinfo->command = cpu_to_le32(VM_ContainerConfig); | 598 | dinfo->command = cpu_to_le32(VM_ContainerConfig); |
| 594 | dinfo->type = cpu_to_le32(CT_READ_NAME); | 599 | dinfo->type = cpu_to_le32(CT_READ_NAME); |
| 595 | dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); | 600 | dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); |
| 596 | dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); | 601 | dinfo->count = cpu_to_le32(data_size - 1); |
| 597 | 602 | ||
| 598 | status = aac_fib_send(ContainerCommand, | 603 | status = aac_fib_send(ContainerCommand, |
| 599 | cmd_fibcontext, | 604 | cmd_fibcontext, |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index d31a9bc2ba69..ee2667e20e42 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
| @@ -2274,7 +2274,7 @@ struct aac_get_name_resp { | |||
| 2274 | __le32 parm3; | 2274 | __le32 parm3; |
| 2275 | __le32 parm4; | 2275 | __le32 parm4; |
| 2276 | __le32 parm5; | 2276 | __le32 parm5; |
| 2277 | u8 data[16]; | 2277 | u8 data[17]; |
| 2278 | }; | 2278 | }; |
| 2279 | 2279 | ||
| 2280 | #define CT_CID_TO_32BITS_UID 165 | 2280 | #define CT_CID_TO_32BITS_UID 165 |
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 2029ad225121..5be0086142ca 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c | |||
| @@ -3845,8 +3845,10 @@ csio_hw_start(struct csio_hw *hw) | |||
| 3845 | 3845 | ||
| 3846 | if (csio_is_hw_ready(hw)) | 3846 | if (csio_is_hw_ready(hw)) |
| 3847 | return 0; | 3847 | return 0; |
| 3848 | else | 3848 | else if (csio_match_state(hw, csio_hws_uninit)) |
| 3849 | return -EINVAL; | 3849 | return -EINVAL; |
| 3850 | else | ||
| 3851 | return -ENODEV; | ||
| 3850 | } | 3852 | } |
| 3851 | 3853 | ||
| 3852 | int | 3854 | int |
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index ea0c31086cc6..dcd074169aa9 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c | |||
| @@ -969,10 +969,14 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 969 | 969 | ||
| 970 | pci_set_drvdata(pdev, hw); | 970 | pci_set_drvdata(pdev, hw); |
| 971 | 971 | ||
| 972 | if (csio_hw_start(hw) != 0) { | 972 | rv = csio_hw_start(hw); |
| 973 | dev_err(&pdev->dev, | 973 | if (rv) { |
| 974 | "Failed to start FW, continuing in debug mode.\n"); | 974 | if (rv == -EINVAL) { |
| 975 | return 0; | 975 | dev_err(&pdev->dev, |
| 976 | "Failed to start FW, continuing in debug mode.\n"); | ||
| 977 | return 0; | ||
| 978 | } | ||
| 979 | goto err_lnode_exit; | ||
| 976 | } | 980 | } |
| 977 | 981 | ||
| 978 | sprintf(hw->fwrev_str, "%u.%u.%u.%u\n", | 982 | sprintf(hw->fwrev_str, "%u.%u.%u.%u\n", |
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index a69a9ac836f5..1d02cf9fe06c 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | |||
| @@ -1635,6 +1635,9 @@ static int init_act_open(struct cxgbi_sock *csk) | |||
| 1635 | goto rel_resource; | 1635 | goto rel_resource; |
| 1636 | } | 1636 | } |
| 1637 | 1637 | ||
| 1638 | if (!(n->nud_state & NUD_VALID)) | ||
| 1639 | neigh_event_send(n, NULL); | ||
| 1640 | |||
| 1638 | csk->atid = cxgb4_alloc_atid(lldi->tids, csk); | 1641 | csk->atid = cxgb4_alloc_atid(lldi->tids, csk); |
| 1639 | if (csk->atid < 0) { | 1642 | if (csk->atid < 0) { |
| 1640 | pr_err("%s, NO atid available.\n", ndev->name); | 1643 | pr_err("%s, NO atid available.\n", ndev->name); |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index b0c68d24db01..da5bdbdcce52 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
| @@ -3351,6 +3351,16 @@ static void ipr_worker_thread(struct work_struct *work) | |||
| 3351 | return; | 3351 | return; |
| 3352 | } | 3352 | } |
| 3353 | 3353 | ||
| 3354 | if (ioa_cfg->scsi_unblock) { | ||
| 3355 | ioa_cfg->scsi_unblock = 0; | ||
| 3356 | ioa_cfg->scsi_blocked = 0; | ||
| 3357 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3358 | scsi_unblock_requests(ioa_cfg->host); | ||
| 3359 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 3360 | if (ioa_cfg->scsi_blocked) | ||
| 3361 | scsi_block_requests(ioa_cfg->host); | ||
| 3362 | } | ||
| 3363 | |||
| 3354 | if (!ioa_cfg->scan_enabled) { | 3364 | if (!ioa_cfg->scan_enabled) { |
| 3355 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 3365 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
| 3356 | return; | 3366 | return; |
| @@ -7211,9 +7221,8 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) | |||
| 7211 | ENTER; | 7221 | ENTER; |
| 7212 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { | 7222 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { |
| 7213 | ipr_trace; | 7223 | ipr_trace; |
| 7214 | spin_unlock_irq(ioa_cfg->host->host_lock); | 7224 | ioa_cfg->scsi_unblock = 1; |
| 7215 | scsi_unblock_requests(ioa_cfg->host); | 7225 | schedule_work(&ioa_cfg->work_q); |
| 7216 | spin_lock_irq(ioa_cfg->host->host_lock); | ||
| 7217 | } | 7226 | } |
| 7218 | 7227 | ||
| 7219 | ioa_cfg->in_reset_reload = 0; | 7228 | ioa_cfg->in_reset_reload = 0; |
| @@ -7287,13 +7296,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) | |||
| 7287 | list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); | 7296 | list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); |
| 7288 | wake_up_all(&ioa_cfg->reset_wait_q); | 7297 | wake_up_all(&ioa_cfg->reset_wait_q); |
| 7289 | 7298 | ||
| 7290 | spin_unlock(ioa_cfg->host->host_lock); | 7299 | ioa_cfg->scsi_unblock = 1; |
| 7291 | scsi_unblock_requests(ioa_cfg->host); | ||
| 7292 | spin_lock(ioa_cfg->host->host_lock); | ||
| 7293 | |||
| 7294 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) | ||
| 7295 | scsi_block_requests(ioa_cfg->host); | ||
| 7296 | |||
| 7297 | schedule_work(&ioa_cfg->work_q); | 7300 | schedule_work(&ioa_cfg->work_q); |
| 7298 | LEAVE; | 7301 | LEAVE; |
| 7299 | return IPR_RC_JOB_RETURN; | 7302 | return IPR_RC_JOB_RETURN; |
| @@ -9249,8 +9252,11 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, | |||
| 9249 | spin_unlock(&ioa_cfg->hrrq[i]._lock); | 9252 | spin_unlock(&ioa_cfg->hrrq[i]._lock); |
| 9250 | } | 9253 | } |
| 9251 | wmb(); | 9254 | wmb(); |
| 9252 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) | 9255 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { |
| 9256 | ioa_cfg->scsi_unblock = 0; | ||
| 9257 | ioa_cfg->scsi_blocked = 1; | ||
| 9253 | scsi_block_requests(ioa_cfg->host); | 9258 | scsi_block_requests(ioa_cfg->host); |
| 9259 | } | ||
| 9254 | 9260 | ||
| 9255 | ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); | 9261 | ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); |
| 9256 | ioa_cfg->reset_cmd = ipr_cmd; | 9262 | ioa_cfg->reset_cmd = ipr_cmd; |
| @@ -9306,9 +9312,8 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, | |||
| 9306 | wake_up_all(&ioa_cfg->reset_wait_q); | 9312 | wake_up_all(&ioa_cfg->reset_wait_q); |
| 9307 | 9313 | ||
| 9308 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { | 9314 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { |
| 9309 | spin_unlock_irq(ioa_cfg->host->host_lock); | 9315 | ioa_cfg->scsi_unblock = 1; |
| 9310 | scsi_unblock_requests(ioa_cfg->host); | 9316 | schedule_work(&ioa_cfg->work_q); |
| 9311 | spin_lock_irq(ioa_cfg->host->host_lock); | ||
| 9312 | } | 9317 | } |
| 9313 | return; | 9318 | return; |
| 9314 | } else { | 9319 | } else { |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index e98a87a65335..c7f0e9e3cd7d 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
| @@ -1488,6 +1488,8 @@ struct ipr_ioa_cfg { | |||
| 1488 | u8 cfg_locked:1; | 1488 | u8 cfg_locked:1; |
| 1489 | u8 clear_isr:1; | 1489 | u8 clear_isr:1; |
| 1490 | u8 probe_done:1; | 1490 | u8 probe_done:1; |
| 1491 | u8 scsi_unblock:1; | ||
| 1492 | u8 scsi_blocked:1; | ||
| 1491 | 1493 | ||
| 1492 | u8 revid; | 1494 | u8 revid; |
| 1493 | 1495 | ||
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 4ed48ed38e79..7ee1a94c0b33 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
| @@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, | |||
| 205 | atomic_read(&tgtp->xmt_ls_rsp_error)); | 205 | atomic_read(&tgtp->xmt_ls_rsp_error)); |
| 206 | 206 | ||
| 207 | len += snprintf(buf+len, PAGE_SIZE-len, | 207 | len += snprintf(buf+len, PAGE_SIZE-len, |
| 208 | "FCP: Rcv %08x Release %08x Drop %08x\n", | 208 | "FCP: Rcv %08x Defer %08x Release %08x " |
| 209 | "Drop %08x\n", | ||
| 209 | atomic_read(&tgtp->rcv_fcp_cmd_in), | 210 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
| 211 | atomic_read(&tgtp->rcv_fcp_cmd_defer), | ||
| 210 | atomic_read(&tgtp->xmt_fcp_release), | 212 | atomic_read(&tgtp->xmt_fcp_release), |
| 211 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); | 213 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); |
| 212 | 214 | ||
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 5cc8b0f7d885..744f3f395b64 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
| @@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) | |||
| 782 | atomic_read(&tgtp->xmt_ls_rsp_error)); | 782 | atomic_read(&tgtp->xmt_ls_rsp_error)); |
| 783 | 783 | ||
| 784 | len += snprintf(buf + len, size - len, | 784 | len += snprintf(buf + len, size - len, |
| 785 | "FCP: Rcv %08x Drop %08x\n", | 785 | "FCP: Rcv %08x Defer %08x Release %08x " |
| 786 | "Drop %08x\n", | ||
| 786 | atomic_read(&tgtp->rcv_fcp_cmd_in), | 787 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
| 788 | atomic_read(&tgtp->rcv_fcp_cmd_defer), | ||
| 789 | atomic_read(&tgtp->xmt_fcp_release), | ||
| 787 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); | 790 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); |
| 788 | 791 | ||
| 789 | if (atomic_read(&tgtp->rcv_fcp_cmd_in) != | 792 | if (atomic_read(&tgtp->rcv_fcp_cmd_in) != |
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index fbeec344c6cc..bbbd0f84160d 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c | |||
| @@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, | |||
| 841 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); | 841 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); |
| 842 | } | 842 | } |
| 843 | 843 | ||
| 844 | static void | ||
| 845 | lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, | ||
| 846 | struct nvmefc_tgt_fcp_req *rsp) | ||
| 847 | { | ||
| 848 | struct lpfc_nvmet_tgtport *tgtp; | ||
| 849 | struct lpfc_nvmet_rcv_ctx *ctxp = | ||
| 850 | container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); | ||
| 851 | struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; | ||
| 852 | struct lpfc_hba *phba = ctxp->phba; | ||
| 853 | |||
| 854 | lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", | ||
| 855 | ctxp->oxid, ctxp->size, smp_processor_id()); | ||
| 856 | |||
| 857 | tgtp = phba->targetport->private; | ||
| 858 | atomic_inc(&tgtp->rcv_fcp_cmd_defer); | ||
| 859 | lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ | ||
| 860 | } | ||
| 861 | |||
| 844 | static struct nvmet_fc_target_template lpfc_tgttemplate = { | 862 | static struct nvmet_fc_target_template lpfc_tgttemplate = { |
| 845 | .targetport_delete = lpfc_nvmet_targetport_delete, | 863 | .targetport_delete = lpfc_nvmet_targetport_delete, |
| 846 | .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, | 864 | .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, |
| 847 | .fcp_op = lpfc_nvmet_xmt_fcp_op, | 865 | .fcp_op = lpfc_nvmet_xmt_fcp_op, |
| 848 | .fcp_abort = lpfc_nvmet_xmt_fcp_abort, | 866 | .fcp_abort = lpfc_nvmet_xmt_fcp_abort, |
| 849 | .fcp_req_release = lpfc_nvmet_xmt_fcp_release, | 867 | .fcp_req_release = lpfc_nvmet_xmt_fcp_release, |
| 868 | .defer_rcv = lpfc_nvmet_defer_rcv, | ||
| 850 | 869 | ||
| 851 | .max_hw_queues = 1, | 870 | .max_hw_queues = 1, |
| 852 | .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, | 871 | .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, |
| @@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, | |||
| 1504 | return; | 1523 | return; |
| 1505 | } | 1524 | } |
| 1506 | 1525 | ||
| 1526 | /* Processing of FCP command is deferred */ | ||
| 1527 | if (rc == -EOVERFLOW) { | ||
| 1528 | lpfc_nvmeio_data(phba, | ||
| 1529 | "NVMET RCV BUSY: xri x%x sz %d from %06x\n", | ||
| 1530 | oxid, size, sid); | ||
| 1531 | /* defer reposting rcv buffer till .defer_rcv callback */ | ||
| 1532 | ctxp->rqb_buffer = nvmebuf; | ||
| 1533 | atomic_inc(&tgtp->rcv_fcp_cmd_out); | ||
| 1534 | return; | ||
| 1535 | } | ||
| 1536 | |||
| 1507 | atomic_inc(&tgtp->rcv_fcp_cmd_drop); | 1537 | atomic_inc(&tgtp->rcv_fcp_cmd_drop); |
| 1508 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | 1538 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
| 1509 | "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", | 1539 | "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", |
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index e675ef17be08..48a76788b003 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h | |||
| @@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport { | |||
| 49 | atomic_t rcv_fcp_cmd_in; | 49 | atomic_t rcv_fcp_cmd_in; |
| 50 | atomic_t rcv_fcp_cmd_out; | 50 | atomic_t rcv_fcp_cmd_out; |
| 51 | atomic_t rcv_fcp_cmd_drop; | 51 | atomic_t rcv_fcp_cmd_drop; |
| 52 | atomic_t rcv_fcp_cmd_defer; | ||
| 52 | atomic_t xmt_fcp_release; | 53 | atomic_t xmt_fcp_release; |
| 53 | 54 | ||
| 54 | /* Stats counters - lpfc_nvmet_xmt_fcp_op */ | 55 | /* Stats counters - lpfc_nvmet_xmt_fcp_op */ |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 316c3df0c3fd..71c4746341ea 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
| @@ -6228,8 +6228,8 @@ static int megasas_probe_one(struct pci_dev *pdev, | |||
| 6228 | fail_start_aen: | 6228 | fail_start_aen: |
| 6229 | fail_io_attach: | 6229 | fail_io_attach: |
| 6230 | megasas_mgmt_info.count--; | 6230 | megasas_mgmt_info.count--; |
| 6231 | megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; | ||
| 6232 | megasas_mgmt_info.max_index--; | 6231 | megasas_mgmt_info.max_index--; |
| 6232 | megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; | ||
| 6233 | 6233 | ||
| 6234 | instance->instancet->disable_intr(instance); | 6234 | instance->instancet->disable_intr(instance); |
| 6235 | megasas_destroy_irqs(instance); | 6235 | megasas_destroy_irqs(instance); |
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 33142610882f..b18646d6057f 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c | |||
| @@ -401,9 +401,6 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, | |||
| 401 | for (i = 0; i < vha->hw->max_req_queues; i++) { | 401 | for (i = 0; i < vha->hw->max_req_queues; i++) { |
| 402 | struct req_que *req = vha->hw->req_q_map[i]; | 402 | struct req_que *req = vha->hw->req_q_map[i]; |
| 403 | 403 | ||
| 404 | if (!test_bit(i, vha->hw->req_qid_map)) | ||
| 405 | continue; | ||
| 406 | |||
| 407 | if (req || !buf) { | 404 | if (req || !buf) { |
| 408 | length = req ? | 405 | length = req ? |
| 409 | req->length : REQUEST_ENTRY_CNT_24XX; | 406 | req->length : REQUEST_ENTRY_CNT_24XX; |
| @@ -418,9 +415,6 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, | |||
| 418 | for (i = 0; i < vha->hw->max_rsp_queues; i++) { | 415 | for (i = 0; i < vha->hw->max_rsp_queues; i++) { |
| 419 | struct rsp_que *rsp = vha->hw->rsp_q_map[i]; | 416 | struct rsp_que *rsp = vha->hw->rsp_q_map[i]; |
| 420 | 417 | ||
| 421 | if (!test_bit(i, vha->hw->rsp_qid_map)) | ||
| 422 | continue; | ||
| 423 | |||
| 424 | if (rsp || !buf) { | 418 | if (rsp || !buf) { |
| 425 | length = rsp ? | 419 | length = rsp ? |
| 426 | rsp->length : RESPONSE_ENTRY_CNT_MQ; | 420 | rsp->length : RESPONSE_ENTRY_CNT_MQ; |
| @@ -660,9 +654,6 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, | |||
| 660 | for (i = 0; i < vha->hw->max_req_queues; i++) { | 654 | for (i = 0; i < vha->hw->max_req_queues; i++) { |
| 661 | struct req_que *req = vha->hw->req_q_map[i]; | 655 | struct req_que *req = vha->hw->req_q_map[i]; |
| 662 | 656 | ||
| 663 | if (!test_bit(i, vha->hw->req_qid_map)) | ||
| 664 | continue; | ||
| 665 | |||
| 666 | if (req || !buf) { | 657 | if (req || !buf) { |
| 667 | qla27xx_insert16(i, buf, len); | 658 | qla27xx_insert16(i, buf, len); |
| 668 | qla27xx_insert16(1, buf, len); | 659 | qla27xx_insert16(1, buf, len); |
| @@ -675,9 +666,6 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, | |||
| 675 | for (i = 0; i < vha->hw->max_rsp_queues; i++) { | 666 | for (i = 0; i < vha->hw->max_rsp_queues; i++) { |
| 676 | struct rsp_que *rsp = vha->hw->rsp_q_map[i]; | 667 | struct rsp_que *rsp = vha->hw->rsp_q_map[i]; |
| 677 | 668 | ||
| 678 | if (!test_bit(i, vha->hw->rsp_qid_map)) | ||
| 679 | continue; | ||
| 680 | |||
| 681 | if (rsp || !buf) { | 669 | if (rsp || !buf) { |
| 682 | qla27xx_insert16(i, buf, len); | 670 | qla27xx_insert16(i, buf, len); |
| 683 | qla27xx_insert16(1, buf, len); | 671 | qla27xx_insert16(1, buf, len); |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index b20da0d27ad7..3f82ea1b72dc 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
| @@ -500,7 +500,6 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, | |||
| 500 | static void tcm_qla2xxx_handle_data_work(struct work_struct *work) | 500 | static void tcm_qla2xxx_handle_data_work(struct work_struct *work) |
| 501 | { | 501 | { |
| 502 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); | 502 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); |
| 503 | unsigned long flags; | ||
| 504 | 503 | ||
| 505 | /* | 504 | /* |
| 506 | * Ensure that the complete FCP WRITE payload has been received. | 505 | * Ensure that the complete FCP WRITE payload has been received. |
| @@ -508,17 +507,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) | |||
| 508 | */ | 507 | */ |
| 509 | cmd->cmd_in_wq = 0; | 508 | cmd->cmd_in_wq = 0; |
| 510 | 509 | ||
| 511 | spin_lock_irqsave(&cmd->cmd_lock, flags); | ||
| 512 | cmd->data_work = 1; | ||
| 513 | if (cmd->aborted) { | ||
| 514 | cmd->data_work_free = 1; | ||
| 515 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 516 | |||
| 517 | tcm_qla2xxx_free_cmd(cmd); | ||
| 518 | return; | ||
| 519 | } | ||
| 520 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 521 | |||
| 522 | cmd->qpair->tgt_counters.qla_core_ret_ctio++; | 510 | cmd->qpair->tgt_counters.qla_core_ret_ctio++; |
| 523 | if (!cmd->write_data_transferred) { | 511 | if (!cmd->write_data_transferred) { |
| 524 | /* | 512 | /* |
| @@ -765,31 +753,13 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) | |||
| 765 | qlt_xmit_tm_rsp(mcmd); | 753 | qlt_xmit_tm_rsp(mcmd); |
| 766 | } | 754 | } |
| 767 | 755 | ||
| 768 | #define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free) | ||
| 769 | static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) | 756 | static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) |
| 770 | { | 757 | { |
| 771 | struct qla_tgt_cmd *cmd = container_of(se_cmd, | 758 | struct qla_tgt_cmd *cmd = container_of(se_cmd, |
| 772 | struct qla_tgt_cmd, se_cmd); | 759 | struct qla_tgt_cmd, se_cmd); |
| 773 | unsigned long flags; | ||
| 774 | 760 | ||
| 775 | if (qlt_abort_cmd(cmd)) | 761 | if (qlt_abort_cmd(cmd)) |
| 776 | return; | 762 | return; |
| 777 | |||
| 778 | spin_lock_irqsave(&cmd->cmd_lock, flags); | ||
| 779 | if ((cmd->state == QLA_TGT_STATE_NEW)|| | ||
| 780 | ((cmd->state == QLA_TGT_STATE_DATA_IN) && | ||
| 781 | DATA_WORK_NOT_FREE(cmd))) { | ||
| 782 | cmd->data_work_free = 1; | ||
| 783 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 784 | /* | ||
| 785 | * cmd has not reached fw, Use this trigger to free it. | ||
| 786 | */ | ||
| 787 | tcm_qla2xxx_free_cmd(cmd); | ||
| 788 | return; | ||
| 789 | } | ||
| 790 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 791 | return; | ||
| 792 | |||
| 793 | } | 763 | } |
| 794 | 764 | ||
| 795 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, | 765 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 3d38c6d463b8..1bf274e3b2b6 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
| @@ -800,7 +800,11 @@ MODULE_LICENSE("GPL"); | |||
| 800 | module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); | 800 | module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); |
| 801 | MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); | 801 | MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); |
| 802 | 802 | ||
| 803 | #ifdef CONFIG_SCSI_MQ_DEFAULT | ||
| 803 | bool scsi_use_blk_mq = true; | 804 | bool scsi_use_blk_mq = true; |
| 805 | #else | ||
| 806 | bool scsi_use_blk_mq = false; | ||
| 807 | #endif | ||
| 804 | module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); | 808 | module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); |
| 805 | 809 | ||
| 806 | static int __init init_scsi(void) | 810 | static int __init init_scsi(void) |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index bea36adeee17..e2647f2d4430 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -1277,6 +1277,9 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt) | |||
| 1277 | { | 1277 | { |
| 1278 | struct request *rq = SCpnt->request; | 1278 | struct request *rq = SCpnt->request; |
| 1279 | 1279 | ||
| 1280 | if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK) | ||
| 1281 | sd_zbc_write_unlock_zone(SCpnt); | ||
| 1282 | |||
| 1280 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) | 1283 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
| 1281 | __free_page(rq->special_vec.bv_page); | 1284 | __free_page(rq->special_vec.bv_page); |
| 1282 | 1285 | ||
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 96855df9f49d..8aa54779aac1 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c | |||
| @@ -294,6 +294,9 @@ int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd) | |||
| 294 | test_and_set_bit(zno, sdkp->zones_wlock)) | 294 | test_and_set_bit(zno, sdkp->zones_wlock)) |
| 295 | return BLKPREP_DEFER; | 295 | return BLKPREP_DEFER; |
| 296 | 296 | ||
| 297 | WARN_ON_ONCE(cmd->flags & SCMD_ZONE_WRITE_LOCK); | ||
| 298 | cmd->flags |= SCMD_ZONE_WRITE_LOCK; | ||
| 299 | |||
| 297 | return BLKPREP_OK; | 300 | return BLKPREP_OK; |
| 298 | } | 301 | } |
| 299 | 302 | ||
| @@ -302,9 +305,10 @@ void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd) | |||
| 302 | struct request *rq = cmd->request; | 305 | struct request *rq = cmd->request; |
| 303 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); | 306 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); |
| 304 | 307 | ||
| 305 | if (sdkp->zones_wlock) { | 308 | if (sdkp->zones_wlock && cmd->flags & SCMD_ZONE_WRITE_LOCK) { |
| 306 | unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq)); | 309 | unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq)); |
| 307 | WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock)); | 310 | WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock)); |
| 311 | cmd->flags &= ~SCMD_ZONE_WRITE_LOCK; | ||
| 308 | clear_bit_unlock(zno, sdkp->zones_wlock); | 312 | clear_bit_unlock(zno, sdkp->zones_wlock); |
| 309 | smp_mb__after_atomic(); | 313 | smp_mb__after_atomic(); |
| 310 | } | 314 | } |
| @@ -335,9 +339,6 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, | |||
| 335 | case REQ_OP_WRITE_ZEROES: | 339 | case REQ_OP_WRITE_ZEROES: |
| 336 | case REQ_OP_WRITE_SAME: | 340 | case REQ_OP_WRITE_SAME: |
| 337 | 341 | ||
| 338 | /* Unlock the zone */ | ||
| 339 | sd_zbc_write_unlock_zone(cmd); | ||
| 340 | |||
| 341 | if (result && | 342 | if (result && |
| 342 | sshdr->sense_key == ILLEGAL_REQUEST && | 343 | sshdr->sense_key == ILLEGAL_REQUEST && |
| 343 | sshdr->asc == 0x21) | 344 | sshdr->asc == 0x21) |
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index f1cdf32d7514..8927f9f54ad9 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
| @@ -99,7 +99,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code, | |||
| 99 | 99 | ||
| 100 | ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, | 100 | ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, |
| 101 | NULL, SES_TIMEOUT, SES_RETRIES, NULL); | 101 | NULL, SES_TIMEOUT, SES_RETRIES, NULL); |
| 102 | if (unlikely(!ret)) | 102 | if (unlikely(ret)) |
| 103 | return ret; | 103 | return ret; |
| 104 | 104 | ||
| 105 | recv_page_code = ((unsigned char *)buf)[0]; | 105 | recv_page_code = ((unsigned char *)buf)[0]; |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 8e5013d9cad4..94e402ed30f6 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
| @@ -4299,11 +4299,11 @@ static int st_probe(struct device *dev) | |||
| 4299 | kref_init(&tpnt->kref); | 4299 | kref_init(&tpnt->kref); |
| 4300 | tpnt->disk = disk; | 4300 | tpnt->disk = disk; |
| 4301 | disk->private_data = &tpnt->driver; | 4301 | disk->private_data = &tpnt->driver; |
| 4302 | disk->queue = SDp->request_queue; | ||
| 4303 | /* SCSI tape doesn't register this gendisk via add_disk(). Manually | 4302 | /* SCSI tape doesn't register this gendisk via add_disk(). Manually |
| 4304 | * take queue reference that release_disk() expects. */ | 4303 | * take queue reference that release_disk() expects. */ |
| 4305 | if (!blk_get_queue(disk->queue)) | 4304 | if (!blk_get_queue(SDp->request_queue)) |
| 4306 | goto out_put_disk; | 4305 | goto out_put_disk; |
| 4306 | disk->queue = SDp->request_queue; | ||
| 4307 | tpnt->driver = &st_template; | 4307 | tpnt->driver = &st_template; |
| 4308 | 4308 | ||
| 4309 | tpnt->device = SDp; | 4309 | tpnt->device = SDp; |
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index 3039072911a5..afc7ecc3c187 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c | |||
| @@ -200,16 +200,11 @@ static int imx7_pgc_domain_probe(struct platform_device *pdev) | |||
| 200 | 200 | ||
| 201 | domain->dev = &pdev->dev; | 201 | domain->dev = &pdev->dev; |
| 202 | 202 | ||
| 203 | ret = pm_genpd_init(&domain->genpd, NULL, true); | ||
| 204 | if (ret) { | ||
| 205 | dev_err(domain->dev, "Failed to init power domain\n"); | ||
| 206 | return ret; | ||
| 207 | } | ||
| 208 | |||
| 209 | domain->regulator = devm_regulator_get_optional(domain->dev, "power"); | 203 | domain->regulator = devm_regulator_get_optional(domain->dev, "power"); |
| 210 | if (IS_ERR(domain->regulator)) { | 204 | if (IS_ERR(domain->regulator)) { |
| 211 | if (PTR_ERR(domain->regulator) != -ENODEV) { | 205 | if (PTR_ERR(domain->regulator) != -ENODEV) { |
| 212 | dev_err(domain->dev, "Failed to get domain's regulator\n"); | 206 | if (PTR_ERR(domain->regulator) != -EPROBE_DEFER) |
| 207 | dev_err(domain->dev, "Failed to get domain's regulator\n"); | ||
| 213 | return PTR_ERR(domain->regulator); | 208 | return PTR_ERR(domain->regulator); |
| 214 | } | 209 | } |
| 215 | } else { | 210 | } else { |
| @@ -217,6 +212,12 @@ static int imx7_pgc_domain_probe(struct platform_device *pdev) | |||
| 217 | domain->voltage, domain->voltage); | 212 | domain->voltage, domain->voltage); |
| 218 | } | 213 | } |
| 219 | 214 | ||
| 215 | ret = pm_genpd_init(&domain->genpd, NULL, true); | ||
| 216 | if (ret) { | ||
| 217 | dev_err(domain->dev, "Failed to init power domain\n"); | ||
| 218 | return ret; | ||
| 219 | } | ||
| 220 | |||
| 220 | ret = of_genpd_add_provider_simple(domain->dev->of_node, | 221 | ret = of_genpd_add_provider_simple(domain->dev->of_node, |
| 221 | &domain->genpd); | 222 | &domain->genpd); |
| 222 | if (ret) { | 223 | if (ret) { |
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index 279e7c5551dd..39225de9d7f1 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c | |||
| @@ -745,6 +745,9 @@ void *knav_pool_create(const char *name, | |||
| 745 | bool slot_found; | 745 | bool slot_found; |
| 746 | int ret; | 746 | int ret; |
| 747 | 747 | ||
| 748 | if (!kdev) | ||
| 749 | return ERR_PTR(-EPROBE_DEFER); | ||
| 750 | |||
| 748 | if (!kdev->dev) | 751 | if (!kdev->dev) |
| 749 | return ERR_PTR(-ENODEV); | 752 | return ERR_PTR(-ENODEV); |
| 750 | 753 | ||
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c index b0b283810e72..de31b9389e2e 100644 --- a/drivers/soc/ti/ti_sci_pm_domains.c +++ b/drivers/soc/ti/ti_sci_pm_domains.c | |||
| @@ -176,6 +176,8 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev) | |||
| 176 | 176 | ||
| 177 | ti_sci_pd->dev = dev; | 177 | ti_sci_pd->dev = dev; |
| 178 | 178 | ||
| 179 | ti_sci_pd->pd.name = "ti_sci_pd"; | ||
| 180 | |||
| 179 | ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev; | 181 | ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev; |
| 180 | ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev; | 182 | ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev; |
| 181 | 183 | ||
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index ca11be21f64b..34ca7823255d 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c | |||
| @@ -2396,6 +2396,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf, | |||
| 2396 | continue; | 2396 | continue; |
| 2397 | } | 2397 | } |
| 2398 | 2398 | ||
| 2399 | set_current_state(TASK_RUNNING); | ||
| 2399 | wp = async->buf_write_ptr; | 2400 | wp = async->buf_write_ptr; |
| 2400 | n1 = min(n, async->prealloc_bufsz - wp); | 2401 | n1 = min(n, async->prealloc_bufsz - wp); |
| 2401 | n2 = n - n1; | 2402 | n2 = n - n1; |
| @@ -2528,6 +2529,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, | |||
| 2528 | } | 2529 | } |
| 2529 | continue; | 2530 | continue; |
| 2530 | } | 2531 | } |
| 2532 | |||
| 2533 | set_current_state(TASK_RUNNING); | ||
| 2531 | rp = async->buf_read_ptr; | 2534 | rp = async->buf_read_ptr; |
| 2532 | n1 = min(n, async->prealloc_bufsz - rp); | 2535 | n1 = min(n, async->prealloc_bufsz - rp); |
| 2533 | n2 = n - n1; | 2536 | n2 = n - n1; |
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c index a6a8393d6664..3e00df74b18c 100644 --- a/drivers/staging/iio/resolver/ad2s1210.c +++ b/drivers/staging/iio/resolver/ad2s1210.c | |||
| @@ -472,7 +472,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev, | |||
| 472 | long m) | 472 | long m) |
| 473 | { | 473 | { |
| 474 | struct ad2s1210_state *st = iio_priv(indio_dev); | 474 | struct ad2s1210_state *st = iio_priv(indio_dev); |
| 475 | bool negative; | 475 | u16 negative; |
| 476 | int ret = 0; | 476 | int ret = 0; |
| 477 | u16 pos; | 477 | u16 pos; |
| 478 | s16 vel; | 478 | s16 vel; |
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index e583dd8a418b..d4fa41be80f9 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c | |||
| @@ -1510,11 +1510,13 @@ cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) | |||
| 1510 | 1510 | ||
| 1511 | if (!cnp) { | 1511 | if (!cnp) { |
| 1512 | pr_info("%s stid %d lookup failure\n", __func__, stid); | 1512 | pr_info("%s stid %d lookup failure\n", __func__, stid); |
| 1513 | return; | 1513 | goto rel_skb; |
| 1514 | } | 1514 | } |
| 1515 | 1515 | ||
| 1516 | cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); | 1516 | cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); |
| 1517 | cxgbit_put_cnp(cnp); | 1517 | cxgbit_put_cnp(cnp); |
| 1518 | rel_skb: | ||
| 1519 | __kfree_skb(skb); | ||
| 1518 | } | 1520 | } |
| 1519 | 1521 | ||
| 1520 | static void | 1522 | static void |
| @@ -1530,11 +1532,13 @@ cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) | |||
| 1530 | 1532 | ||
| 1531 | if (!cnp) { | 1533 | if (!cnp) { |
| 1532 | pr_info("%s stid %d lookup failure\n", __func__, stid); | 1534 | pr_info("%s stid %d lookup failure\n", __func__, stid); |
| 1533 | return; | 1535 | goto rel_skb; |
| 1534 | } | 1536 | } |
| 1535 | 1537 | ||
| 1536 | cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); | 1538 | cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); |
| 1537 | cxgbit_put_cnp(cnp); | 1539 | cxgbit_put_cnp(cnp); |
| 1540 | rel_skb: | ||
| 1541 | __kfree_skb(skb); | ||
| 1538 | } | 1542 | } |
| 1539 | 1543 | ||
| 1540 | static void | 1544 | static void |
| @@ -1819,12 +1823,16 @@ static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) | |||
| 1819 | struct tid_info *t = lldi->tids; | 1823 | struct tid_info *t = lldi->tids; |
| 1820 | 1824 | ||
| 1821 | csk = lookup_tid(t, tid); | 1825 | csk = lookup_tid(t, tid); |
| 1822 | if (unlikely(!csk)) | 1826 | if (unlikely(!csk)) { |
| 1823 | pr_err("can't find connection for tid %u.\n", tid); | 1827 | pr_err("can't find connection for tid %u.\n", tid); |
| 1824 | else | 1828 | goto rel_skb; |
| 1829 | } else { | ||
| 1825 | cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); | 1830 | cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); |
| 1831 | } | ||
| 1826 | 1832 | ||
| 1827 | cxgbit_put_csk(csk); | 1833 | cxgbit_put_csk(csk); |
| 1834 | rel_skb: | ||
| 1835 | __kfree_skb(skb); | ||
| 1828 | } | 1836 | } |
| 1829 | 1837 | ||
| 1830 | static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb) | 1838 | static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb) |
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index dda13f1af38e..514986b57c2d 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c | |||
| @@ -827,7 +827,7 @@ cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, | |||
| 827 | 827 | ||
| 828 | static void | 828 | static void |
| 829 | cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, | 829 | cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, |
| 830 | unsigned int nents) | 830 | unsigned int nents, u32 skip) |
| 831 | { | 831 | { |
| 832 | struct skb_seq_state st; | 832 | struct skb_seq_state st; |
| 833 | const u8 *buf; | 833 | const u8 *buf; |
| @@ -846,7 +846,7 @@ cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, | |||
| 846 | } | 846 | } |
| 847 | 847 | ||
| 848 | consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf, | 848 | consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf, |
| 849 | buf_len, consumed); | 849 | buf_len, skip + consumed); |
| 850 | } | 850 | } |
| 851 | } | 851 | } |
| 852 | 852 | ||
| @@ -912,7 +912,7 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, | |||
| 912 | struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0]; | 912 | struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0]; |
| 913 | u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE)); | 913 | u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE)); |
| 914 | 914 | ||
| 915 | cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents); | 915 | cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0); |
| 916 | } | 916 | } |
| 917 | 917 | ||
| 918 | cmd->write_data_done += pdu_cb->dlen; | 918 | cmd->write_data_done += pdu_cb->dlen; |
| @@ -1069,11 +1069,13 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk) | |||
| 1069 | cmd->se_cmd.data_length); | 1069 | cmd->se_cmd.data_length); |
| 1070 | 1070 | ||
| 1071 | if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) { | 1071 | if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) { |
| 1072 | u32 skip = data_offset % PAGE_SIZE; | ||
| 1073 | |||
| 1072 | sg_off = data_offset / PAGE_SIZE; | 1074 | sg_off = data_offset / PAGE_SIZE; |
| 1073 | sg_start = &cmd->se_cmd.t_data_sg[sg_off]; | 1075 | sg_start = &cmd->se_cmd.t_data_sg[sg_off]; |
| 1074 | sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE)); | 1076 | sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE)); |
| 1075 | 1077 | ||
| 1076 | cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents); | 1078 | cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip); |
| 1077 | } | 1079 | } |
| 1078 | 1080 | ||
| 1079 | check_payload: | 1081 | check_payload: |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 74e4975dd1b1..5001261f5d69 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
| @@ -418,6 +418,7 @@ int iscsit_reset_np_thread( | |||
| 418 | return 0; | 418 | return 0; |
| 419 | } | 419 | } |
| 420 | np->np_thread_state = ISCSI_NP_THREAD_RESET; | 420 | np->np_thread_state = ISCSI_NP_THREAD_RESET; |
| 421 | atomic_inc(&np->np_reset_count); | ||
| 421 | 422 | ||
| 422 | if (np->np_thread) { | 423 | if (np->np_thread) { |
| 423 | spin_unlock_bh(&np->np_thread_lock); | 424 | spin_unlock_bh(&np->np_thread_lock); |
| @@ -2167,6 +2168,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
| 2167 | cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); | 2168 | cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); |
| 2168 | cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); | 2169 | cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); |
| 2169 | cmd->data_direction = DMA_NONE; | 2170 | cmd->data_direction = DMA_NONE; |
| 2171 | kfree(cmd->text_in_ptr); | ||
| 2170 | cmd->text_in_ptr = NULL; | 2172 | cmd->text_in_ptr = NULL; |
| 2171 | 2173 | ||
| 2172 | return 0; | 2174 | return 0; |
| @@ -3487,9 +3489,9 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, | |||
| 3487 | return text_length; | 3489 | return text_length; |
| 3488 | 3490 | ||
| 3489 | if (completed) { | 3491 | if (completed) { |
| 3490 | hdr->flags |= ISCSI_FLAG_CMD_FINAL; | 3492 | hdr->flags = ISCSI_FLAG_CMD_FINAL; |
| 3491 | } else { | 3493 | } else { |
| 3492 | hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE; | 3494 | hdr->flags = ISCSI_FLAG_TEXT_CONTINUE; |
| 3493 | cmd->read_data_done += text_length; | 3495 | cmd->read_data_done += text_length; |
| 3494 | if (cmd->targ_xfer_tag == 0xFFFFFFFF) | 3496 | if (cmd->targ_xfer_tag == 0xFFFFFFFF) |
| 3495 | cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); | 3497 | cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index e9bdc8b86e7d..dc13afbd4c88 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
| @@ -1243,9 +1243,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
| 1243 | flush_signals(current); | 1243 | flush_signals(current); |
| 1244 | 1244 | ||
| 1245 | spin_lock_bh(&np->np_thread_lock); | 1245 | spin_lock_bh(&np->np_thread_lock); |
| 1246 | if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { | 1246 | if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { |
| 1247 | np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; | 1247 | np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; |
| 1248 | spin_unlock_bh(&np->np_thread_lock); | ||
| 1248 | complete(&np->np_restart_comp); | 1249 | complete(&np->np_restart_comp); |
| 1250 | return 1; | ||
| 1249 | } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { | 1251 | } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { |
| 1250 | spin_unlock_bh(&np->np_thread_lock); | 1252 | spin_unlock_bh(&np->np_thread_lock); |
| 1251 | goto exit; | 1253 | goto exit; |
| @@ -1278,7 +1280,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
| 1278 | goto exit; | 1280 | goto exit; |
| 1279 | } else if (rc < 0) { | 1281 | } else if (rc < 0) { |
| 1280 | spin_lock_bh(&np->np_thread_lock); | 1282 | spin_lock_bh(&np->np_thread_lock); |
| 1281 | if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { | 1283 | if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { |
| 1284 | np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; | ||
| 1282 | spin_unlock_bh(&np->np_thread_lock); | 1285 | spin_unlock_bh(&np->np_thread_lock); |
| 1283 | complete(&np->np_restart_comp); | 1286 | complete(&np->np_restart_comp); |
| 1284 | iscsit_put_transport(conn->conn_transport); | 1287 | iscsit_put_transport(conn->conn_transport); |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 36913734c6bc..02e8a5d86658 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
| @@ -364,7 +364,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) | |||
| 364 | mutex_lock(&tpg->acl_node_mutex); | 364 | mutex_lock(&tpg->acl_node_mutex); |
| 365 | if (acl->dynamic_node_acl) | 365 | if (acl->dynamic_node_acl) |
| 366 | acl->dynamic_node_acl = 0; | 366 | acl->dynamic_node_acl = 0; |
| 367 | list_del(&acl->acl_list); | 367 | list_del_init(&acl->acl_list); |
| 368 | mutex_unlock(&tpg->acl_node_mutex); | 368 | mutex_unlock(&tpg->acl_node_mutex); |
| 369 | 369 | ||
| 370 | target_shutdown_sessions(acl); | 370 | target_shutdown_sessions(acl); |
| @@ -548,7 +548,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) | |||
| 548 | * in transport_deregister_session(). | 548 | * in transport_deregister_session(). |
| 549 | */ | 549 | */ |
| 550 | list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { | 550 | list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { |
| 551 | list_del(&nacl->acl_list); | 551 | list_del_init(&nacl->acl_list); |
| 552 | 552 | ||
| 553 | core_tpg_wait_for_nacl_pr_ref(nacl); | 553 | core_tpg_wait_for_nacl_pr_ref(nacl); |
| 554 | core_free_device_list_for_node(nacl, se_tpg); | 554 | core_free_device_list_for_node(nacl, se_tpg); |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 97fed9a298bd..836d552b0385 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
| @@ -466,7 +466,7 @@ static void target_complete_nacl(struct kref *kref) | |||
| 466 | } | 466 | } |
| 467 | 467 | ||
| 468 | mutex_lock(&se_tpg->acl_node_mutex); | 468 | mutex_lock(&se_tpg->acl_node_mutex); |
| 469 | list_del(&nacl->acl_list); | 469 | list_del_init(&nacl->acl_list); |
| 470 | mutex_unlock(&se_tpg->acl_node_mutex); | 470 | mutex_unlock(&se_tpg->acl_node_mutex); |
| 471 | 471 | ||
| 472 | core_tpg_wait_for_nacl_pr_ref(nacl); | 472 | core_tpg_wait_for_nacl_pr_ref(nacl); |
| @@ -538,7 +538,7 @@ void transport_free_session(struct se_session *se_sess) | |||
| 538 | spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); | 538 | spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); |
| 539 | 539 | ||
| 540 | if (se_nacl->dynamic_stop) | 540 | if (se_nacl->dynamic_stop) |
| 541 | list_del(&se_nacl->acl_list); | 541 | list_del_init(&se_nacl->acl_list); |
| 542 | } | 542 | } |
| 543 | mutex_unlock(&se_tpg->acl_node_mutex); | 543 | mutex_unlock(&se_tpg->acl_node_mutex); |
| 544 | 544 | ||
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 80ee130f8253..942d094269fb 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
| @@ -563,8 +563,6 @@ static int scatter_data_area(struct tcmu_dev *udev, | |||
| 563 | block_remaining); | 563 | block_remaining); |
| 564 | to_offset = get_block_offset_user(udev, dbi, | 564 | to_offset = get_block_offset_user(udev, dbi, |
| 565 | block_remaining); | 565 | block_remaining); |
| 566 | offset = DATA_BLOCK_SIZE - block_remaining; | ||
| 567 | to += offset; | ||
| 568 | 566 | ||
| 569 | if (*iov_cnt != 0 && | 567 | if (*iov_cnt != 0 && |
| 570 | to_offset == iov_tail(*iov)) { | 568 | to_offset == iov_tail(*iov)) { |
| @@ -575,8 +573,10 @@ static int scatter_data_area(struct tcmu_dev *udev, | |||
| 575 | (*iov)->iov_len = copy_bytes; | 573 | (*iov)->iov_len = copy_bytes; |
| 576 | } | 574 | } |
| 577 | if (copy_data) { | 575 | if (copy_data) { |
| 578 | memcpy(to, from + sg->length - sg_remaining, | 576 | offset = DATA_BLOCK_SIZE - block_remaining; |
| 579 | copy_bytes); | 577 | memcpy(to + offset, |
| 578 | from + sg->length - sg_remaining, | ||
| 579 | copy_bytes); | ||
| 580 | tcmu_flush_dcache_range(to, copy_bytes); | 580 | tcmu_flush_dcache_range(to, copy_bytes); |
| 581 | } | 581 | } |
| 582 | sg_remaining -= copy_bytes; | 582 | sg_remaining -= copy_bytes; |
| @@ -637,9 +637,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, | |||
| 637 | copy_bytes = min_t(size_t, sg_remaining, | 637 | copy_bytes = min_t(size_t, sg_remaining, |
| 638 | block_remaining); | 638 | block_remaining); |
| 639 | offset = DATA_BLOCK_SIZE - block_remaining; | 639 | offset = DATA_BLOCK_SIZE - block_remaining; |
| 640 | from += offset; | ||
| 641 | tcmu_flush_dcache_range(from, copy_bytes); | 640 | tcmu_flush_dcache_range(from, copy_bytes); |
| 642 | memcpy(to + sg->length - sg_remaining, from, | 641 | memcpy(to + sg->length - sg_remaining, from + offset, |
| 643 | copy_bytes); | 642 | copy_bytes); |
| 644 | 643 | ||
| 645 | sg_remaining -= copy_bytes; | 644 | sg_remaining -= copy_bytes; |
| @@ -1433,6 +1432,8 @@ static int tcmu_update_uio_info(struct tcmu_dev *udev) | |||
| 1433 | if (udev->dev_config[0]) | 1432 | if (udev->dev_config[0]) |
| 1434 | snprintf(str + used, size - used, "/%s", udev->dev_config); | 1433 | snprintf(str + used, size - used, "/%s", udev->dev_config); |
| 1435 | 1434 | ||
| 1435 | /* If the old string exists, free it */ | ||
| 1436 | kfree(info->name); | ||
| 1436 | info->name = str; | 1437 | info->name = str; |
| 1437 | 1438 | ||
| 1438 | return 0; | 1439 | return 0; |
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c index 308b6e17c88a..fe2f00ceafc5 100644 --- a/drivers/thunderbolt/eeprom.c +++ b/drivers/thunderbolt/eeprom.c | |||
| @@ -333,6 +333,15 @@ static int tb_drom_parse_entry_port(struct tb_switch *sw, | |||
| 333 | int res; | 333 | int res; |
| 334 | enum tb_port_type type; | 334 | enum tb_port_type type; |
| 335 | 335 | ||
| 336 | /* | ||
| 337 | * Some DROMs list more ports than the controller actually has | ||
| 338 | * so we skip those but allow the parser to continue. | ||
| 339 | */ | ||
| 340 | if (header->index > sw->config.max_port_number) { | ||
| 341 | dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n"); | ||
| 342 | return 0; | ||
| 343 | } | ||
| 344 | |||
| 336 | port = &sw->ports[header->index]; | 345 | port = &sw->ports[header->index]; |
| 337 | port->disabled = header->port_disabled; | 346 | port->disabled = header->port_disabled; |
| 338 | if (port->disabled) | 347 | if (port->disabled) |
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 284749fb0f6b..a6d5164c33a9 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c | |||
| @@ -69,13 +69,8 @@ static void pty_close(struct tty_struct *tty, struct file *filp) | |||
| 69 | #ifdef CONFIG_UNIX98_PTYS | 69 | #ifdef CONFIG_UNIX98_PTYS |
| 70 | if (tty->driver == ptm_driver) { | 70 | if (tty->driver == ptm_driver) { |
| 71 | mutex_lock(&devpts_mutex); | 71 | mutex_lock(&devpts_mutex); |
| 72 | if (tty->link->driver_data) { | 72 | if (tty->link->driver_data) |
| 73 | struct path *path = tty->link->driver_data; | 73 | devpts_pty_kill(tty->link->driver_data); |
| 74 | |||
| 75 | devpts_pty_kill(path->dentry); | ||
| 76 | path_put(path); | ||
| 77 | kfree(path); | ||
| 78 | } | ||
| 79 | mutex_unlock(&devpts_mutex); | 74 | mutex_unlock(&devpts_mutex); |
| 80 | } | 75 | } |
| 81 | #endif | 76 | #endif |
| @@ -607,25 +602,24 @@ static inline void legacy_pty_init(void) { } | |||
| 607 | static struct cdev ptmx_cdev; | 602 | static struct cdev ptmx_cdev; |
| 608 | 603 | ||
| 609 | /** | 604 | /** |
| 610 | * pty_open_peer - open the peer of a pty | 605 | * ptm_open_peer - open the peer of a pty |
| 611 | * @tty: the peer of the pty being opened | 606 | * @master: the open struct file of the ptmx device node |
| 607 | * @tty: the master of the pty being opened | ||
| 608 | * @flags: the flags for open | ||
| 612 | * | 609 | * |
| 613 | * Open the cached dentry in tty->link, providing a safe way for userspace | 610 | * Provide a race free way for userspace to open the slave end of a pty |
| 614 | * to get the slave end of a pty (where they have the master fd and cannot | 611 | * (where they have the master fd and cannot access or trust the mount |
| 615 | * access or trust the mount namespace /dev/pts was mounted inside). | 612 | * namespace /dev/pts was mounted inside). |
| 616 | */ | 613 | */ |
| 617 | static struct file *pty_open_peer(struct tty_struct *tty, int flags) | 614 | int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags) |
| 618 | { | ||
| 619 | if (tty->driver->subtype != PTY_TYPE_MASTER) | ||
| 620 | return ERR_PTR(-EIO); | ||
| 621 | return dentry_open(tty->link->driver_data, flags, current_cred()); | ||
| 622 | } | ||
| 623 | |||
| 624 | static int pty_get_peer(struct tty_struct *tty, int flags) | ||
| 625 | { | 615 | { |
| 626 | int fd = -1; | 616 | int fd = -1; |
| 627 | struct file *filp = NULL; | 617 | struct file *filp; |
| 628 | int retval = -EINVAL; | 618 | int retval = -EINVAL; |
| 619 | struct path path; | ||
| 620 | |||
| 621 | if (tty->driver != ptm_driver) | ||
| 622 | return -EIO; | ||
| 629 | 623 | ||
| 630 | fd = get_unused_fd_flags(0); | 624 | fd = get_unused_fd_flags(0); |
| 631 | if (fd < 0) { | 625 | if (fd < 0) { |
| @@ -633,7 +627,16 @@ static int pty_get_peer(struct tty_struct *tty, int flags) | |||
| 633 | goto err; | 627 | goto err; |
| 634 | } | 628 | } |
| 635 | 629 | ||
| 636 | filp = pty_open_peer(tty, flags); | 630 | /* Compute the slave's path */ |
| 631 | path.mnt = devpts_mntget(master, tty->driver_data); | ||
| 632 | if (IS_ERR(path.mnt)) { | ||
| 633 | retval = PTR_ERR(path.mnt); | ||
| 634 | goto err_put; | ||
| 635 | } | ||
| 636 | path.dentry = tty->link->driver_data; | ||
| 637 | |||
| 638 | filp = dentry_open(&path, flags, current_cred()); | ||
| 639 | mntput(path.mnt); | ||
| 637 | if (IS_ERR(filp)) { | 640 | if (IS_ERR(filp)) { |
| 638 | retval = PTR_ERR(filp); | 641 | retval = PTR_ERR(filp); |
| 639 | goto err_put; | 642 | goto err_put; |
| @@ -662,8 +665,6 @@ static int pty_unix98_ioctl(struct tty_struct *tty, | |||
| 662 | return pty_get_pktmode(tty, (int __user *)arg); | 665 | return pty_get_pktmode(tty, (int __user *)arg); |
| 663 | case TIOCGPTN: /* Get PT Number */ | 666 | case TIOCGPTN: /* Get PT Number */ |
| 664 | return put_user(tty->index, (unsigned int __user *)arg); | 667 | return put_user(tty->index, (unsigned int __user *)arg); |
| 665 | case TIOCGPTPEER: /* Open the other end */ | ||
| 666 | return pty_get_peer(tty, (int) arg); | ||
| 667 | case TIOCSIG: /* Send signal to other side of pty */ | 668 | case TIOCSIG: /* Send signal to other side of pty */ |
| 668 | return pty_signal(tty, (int) arg); | 669 | return pty_signal(tty, (int) arg); |
| 669 | } | 670 | } |
| @@ -791,7 +792,6 @@ static int ptmx_open(struct inode *inode, struct file *filp) | |||
| 791 | { | 792 | { |
| 792 | struct pts_fs_info *fsi; | 793 | struct pts_fs_info *fsi; |
| 793 | struct tty_struct *tty; | 794 | struct tty_struct *tty; |
| 794 | struct path *pts_path; | ||
| 795 | struct dentry *dentry; | 795 | struct dentry *dentry; |
| 796 | int retval; | 796 | int retval; |
| 797 | int index; | 797 | int index; |
| @@ -845,26 +845,16 @@ static int ptmx_open(struct inode *inode, struct file *filp) | |||
| 845 | retval = PTR_ERR(dentry); | 845 | retval = PTR_ERR(dentry); |
| 846 | goto err_release; | 846 | goto err_release; |
| 847 | } | 847 | } |
| 848 | /* We need to cache a fake path for TIOCGPTPEER. */ | 848 | tty->link->driver_data = dentry; |
| 849 | pts_path = kmalloc(sizeof(struct path), GFP_KERNEL); | ||
| 850 | if (!pts_path) | ||
| 851 | goto err_release; | ||
| 852 | pts_path->mnt = filp->f_path.mnt; | ||
| 853 | pts_path->dentry = dentry; | ||
| 854 | path_get(pts_path); | ||
| 855 | tty->link->driver_data = pts_path; | ||
| 856 | 849 | ||
| 857 | retval = ptm_driver->ops->open(tty, filp); | 850 | retval = ptm_driver->ops->open(tty, filp); |
| 858 | if (retval) | 851 | if (retval) |
| 859 | goto err_path_put; | 852 | goto err_release; |
| 860 | 853 | ||
| 861 | tty_debug_hangup(tty, "opening (count=%d)\n", tty->count); | 854 | tty_debug_hangup(tty, "opening (count=%d)\n", tty->count); |
| 862 | 855 | ||
| 863 | tty_unlock(tty); | 856 | tty_unlock(tty); |
| 864 | return 0; | 857 | return 0; |
| 865 | err_path_put: | ||
| 866 | path_put(pts_path); | ||
| 867 | kfree(pts_path); | ||
| 868 | err_release: | 858 | err_release: |
| 869 | tty_unlock(tty); | 859 | tty_unlock(tty); |
| 870 | // This will also put-ref the fsi | 860 | // This will also put-ref the fsi |
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index b5def356af63..1aab3010fbfa 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c | |||
| @@ -1043,13 +1043,24 @@ int serial8250_register_8250_port(struct uart_8250_port *up) | |||
| 1043 | if (up->dl_write) | 1043 | if (up->dl_write) |
| 1044 | uart->dl_write = up->dl_write; | 1044 | uart->dl_write = up->dl_write; |
| 1045 | 1045 | ||
| 1046 | if (serial8250_isa_config != NULL) | 1046 | if (uart->port.type != PORT_8250_CIR) { |
| 1047 | serial8250_isa_config(0, &uart->port, | 1047 | if (serial8250_isa_config != NULL) |
| 1048 | &uart->capabilities); | 1048 | serial8250_isa_config(0, &uart->port, |
| 1049 | &uart->capabilities); | ||
| 1050 | |||
| 1051 | ret = uart_add_one_port(&serial8250_reg, | ||
| 1052 | &uart->port); | ||
| 1053 | if (ret == 0) | ||
| 1054 | ret = uart->port.line; | ||
| 1055 | } else { | ||
| 1056 | dev_info(uart->port.dev, | ||
| 1057 | "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n", | ||
| 1058 | uart->port.iobase, | ||
| 1059 | (unsigned long long)uart->port.mapbase, | ||
| 1060 | uart->port.irq); | ||
| 1049 | 1061 | ||
| 1050 | ret = uart_add_one_port(&serial8250_reg, &uart->port); | 1062 | ret = 0; |
| 1051 | if (ret == 0) | 1063 | } |
| 1052 | ret = uart->port.line; | ||
| 1053 | } | 1064 | } |
| 1054 | mutex_unlock(&serial_mutex); | 1065 | mutex_unlock(&serial_mutex); |
| 1055 | 1066 | ||
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 8a857bb34fbb..1888d168a41c 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c | |||
| @@ -142,15 +142,7 @@ static struct vendor_data vendor_sbsa = { | |||
| 142 | .fixed_options = true, | 142 | .fixed_options = true, |
| 143 | }; | 143 | }; |
| 144 | 144 | ||
| 145 | /* | 145 | #ifdef CONFIG_ACPI_SPCR_TABLE |
| 146 | * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as | ||
| 147 | * occasionally getting stuck as 1. To avoid the potential for a hang, check | ||
| 148 | * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART | ||
| 149 | * implementations, so only do so if an affected platform is detected in | ||
| 150 | * parse_spcr(). | ||
| 151 | */ | ||
| 152 | static bool qdf2400_e44_present = false; | ||
| 153 | |||
| 154 | static struct vendor_data vendor_qdt_qdf2400_e44 = { | 146 | static struct vendor_data vendor_qdt_qdf2400_e44 = { |
| 155 | .reg_offset = pl011_std_offsets, | 147 | .reg_offset = pl011_std_offsets, |
| 156 | .fr_busy = UART011_FR_TXFE, | 148 | .fr_busy = UART011_FR_TXFE, |
| @@ -165,6 +157,7 @@ static struct vendor_data vendor_qdt_qdf2400_e44 = { | |||
| 165 | .always_enabled = true, | 157 | .always_enabled = true, |
| 166 | .fixed_options = true, | 158 | .fixed_options = true, |
| 167 | }; | 159 | }; |
| 160 | #endif | ||
| 168 | 161 | ||
| 169 | static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { | 162 | static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { |
| 170 | [REG_DR] = UART01x_DR, | 163 | [REG_DR] = UART01x_DR, |
| @@ -2375,12 +2368,14 @@ static int __init pl011_console_match(struct console *co, char *name, int idx, | |||
| 2375 | resource_size_t addr; | 2368 | resource_size_t addr; |
| 2376 | int i; | 2369 | int i; |
| 2377 | 2370 | ||
| 2378 | if (strcmp(name, "qdf2400_e44") == 0) { | 2371 | /* |
| 2379 | pr_info_once("UART: Working around QDF2400 SoC erratum 44"); | 2372 | * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum |
| 2380 | qdf2400_e44_present = true; | 2373 | * have a distinct console name, so make sure we check for that. |
| 2381 | } else if (strcmp(name, "pl011") != 0) { | 2374 | * The actual implementation of the erratum occurs in the probe |
| 2375 | * function. | ||
| 2376 | */ | ||
| 2377 | if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0)) | ||
| 2382 | return -ENODEV; | 2378 | return -ENODEV; |
| 2383 | } | ||
| 2384 | 2379 | ||
| 2385 | if (uart_parse_earlycon(options, &iotype, &addr, &options)) | 2380 | if (uart_parse_earlycon(options, &iotype, &addr, &options)) |
| 2386 | return -ENODEV; | 2381 | return -ENODEV; |
| @@ -2734,11 +2729,17 @@ static int sbsa_uart_probe(struct platform_device *pdev) | |||
| 2734 | } | 2729 | } |
| 2735 | uap->port.irq = ret; | 2730 | uap->port.irq = ret; |
| 2736 | 2731 | ||
| 2737 | uap->reg_offset = vendor_sbsa.reg_offset; | 2732 | #ifdef CONFIG_ACPI_SPCR_TABLE |
| 2738 | uap->vendor = qdf2400_e44_present ? | 2733 | if (qdf2400_e44_present) { |
| 2739 | &vendor_qdt_qdf2400_e44 : &vendor_sbsa; | 2734 | dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n"); |
| 2735 | uap->vendor = &vendor_qdt_qdf2400_e44; | ||
| 2736 | } else | ||
| 2737 | #endif | ||
| 2738 | uap->vendor = &vendor_sbsa; | ||
| 2739 | |||
| 2740 | uap->reg_offset = uap->vendor->reg_offset; | ||
| 2740 | uap->fifosize = 32; | 2741 | uap->fifosize = 32; |
| 2741 | uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM; | 2742 | uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; |
| 2742 | uap->port.ops = &sbsa_uart_pops; | 2743 | uap->port.ops = &sbsa_uart_pops; |
| 2743 | uap->fixed_baud = baudrate; | 2744 | uap->fixed_baud = baudrate; |
| 2744 | 2745 | ||
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 974b13d24401..10c4038c0e8d 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
| @@ -2518,6 +2518,9 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 2518 | case TIOCSSERIAL: | 2518 | case TIOCSSERIAL: |
| 2519 | tty_warn_deprecated_flags(p); | 2519 | tty_warn_deprecated_flags(p); |
| 2520 | break; | 2520 | break; |
| 2521 | case TIOCGPTPEER: | ||
| 2522 | /* Special because the struct file is needed */ | ||
| 2523 | return ptm_open_peer(file, tty, (int)arg); | ||
| 2521 | default: | 2524 | default: |
| 2522 | retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg); | 2525 | retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg); |
| 2523 | if (retval != -ENOIOCTLCMD) | 2526 | if (retval != -ENOIOCTLCMD) |
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index ab1bb3b538ac..7f277b092b5b 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
| @@ -1888,7 +1888,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev, | |||
| 1888 | /* No more submits can occur */ | 1888 | /* No more submits can occur */ |
| 1889 | spin_lock_irq(&hcd_urb_list_lock); | 1889 | spin_lock_irq(&hcd_urb_list_lock); |
| 1890 | rescan: | 1890 | rescan: |
| 1891 | list_for_each_entry (urb, &ep->urb_list, urb_list) { | 1891 | list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) { |
| 1892 | int is_in; | 1892 | int is_in; |
| 1893 | 1893 | ||
| 1894 | if (urb->unlinked) | 1894 | if (urb->unlinked) |
| @@ -2485,6 +2485,8 @@ void usb_hc_died (struct usb_hcd *hcd) | |||
| 2485 | } | 2485 | } |
| 2486 | if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) { | 2486 | if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) { |
| 2487 | hcd = hcd->shared_hcd; | 2487 | hcd = hcd->shared_hcd; |
| 2488 | clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); | ||
| 2489 | set_bit(HCD_FLAG_DEAD, &hcd->flags); | ||
| 2488 | if (hcd->rh_registered) { | 2490 | if (hcd->rh_registered) { |
| 2489 | clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); | 2491 | clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); |
| 2490 | 2492 | ||
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 6e6797d145dd..822f8c50e423 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
| @@ -4725,7 +4725,8 @@ hub_power_remaining(struct usb_hub *hub) | |||
| 4725 | static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, | 4725 | static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, |
| 4726 | u16 portchange) | 4726 | u16 portchange) |
| 4727 | { | 4727 | { |
| 4728 | int status, i; | 4728 | int status = -ENODEV; |
| 4729 | int i; | ||
| 4729 | unsigned unit_load; | 4730 | unsigned unit_load; |
| 4730 | struct usb_device *hdev = hub->hdev; | 4731 | struct usb_device *hdev = hub->hdev; |
| 4731 | struct usb_hcd *hcd = bus_to_hcd(hdev->bus); | 4732 | struct usb_hcd *hcd = bus_to_hcd(hdev->bus); |
| @@ -4929,9 +4930,10 @@ loop: | |||
| 4929 | 4930 | ||
| 4930 | done: | 4931 | done: |
| 4931 | hub_port_disable(hub, port1, 1); | 4932 | hub_port_disable(hub, port1, 1); |
| 4932 | if (hcd->driver->relinquish_port && !hub->hdev->parent) | 4933 | if (hcd->driver->relinquish_port && !hub->hdev->parent) { |
| 4933 | hcd->driver->relinquish_port(hcd, port1); | 4934 | if (status != -ENOTCONN && status != -ENODEV) |
| 4934 | 4935 | hcd->driver->relinquish_port(hcd, port1); | |
| 4936 | } | ||
| 4935 | } | 4937 | } |
| 4936 | 4938 | ||
| 4937 | /* Handle physical or logical connection change events. | 4939 | /* Handle physical or logical connection change events. |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 3116edfcdc18..574da2b4529c 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
| @@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 150 | /* appletouch */ | 150 | /* appletouch */ |
| 151 | { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, | 151 | { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 152 | 152 | ||
| 153 | /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ | ||
| 154 | { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, | ||
| 155 | |||
| 153 | /* Avision AV600U */ | 156 | /* Avision AV600U */ |
| 154 | { USB_DEVICE(0x0638, 0x0a13), .driver_info = | 157 | { USB_DEVICE(0x0638, 0x0a13), .driver_info = |
| 155 | USB_QUIRK_STRING_FETCH_255 }, | 158 | USB_QUIRK_STRING_FETCH_255 }, |
| @@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { | |||
| 249 | { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME }, | 252 | { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 250 | { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME }, | 253 | { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 251 | { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME }, | 254 | { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 255 | { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
| 252 | 256 | ||
| 253 | /* Logitech Optical Mouse M90/M100 */ | 257 | /* Logitech Optical Mouse M90/M100 */ |
| 254 | { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME }, | 258 | { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME }, |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 6b299c7b7656..f064f1549333 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
| @@ -896,9 +896,40 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, | |||
| 896 | if (!node) { | 896 | if (!node) { |
| 897 | trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; | 897 | trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; |
| 898 | 898 | ||
| 899 | /* | ||
| 900 | * USB Specification 2.0 Section 5.9.2 states that: "If | ||
| 901 | * there is only a single transaction in the microframe, | ||
| 902 | * only a DATA0 data packet PID is used. If there are | ||
| 903 | * two transactions per microframe, DATA1 is used for | ||
| 904 | * the first transaction data packet and DATA0 is used | ||
| 905 | * for the second transaction data packet. If there are | ||
| 906 | * three transactions per microframe, DATA2 is used for | ||
| 907 | * the first transaction data packet, DATA1 is used for | ||
| 908 | * the second, and DATA0 is used for the third." | ||
| 909 | * | ||
| 910 | * IOW, we should satisfy the following cases: | ||
| 911 | * | ||
| 912 | * 1) length <= maxpacket | ||
| 913 | * - DATA0 | ||
| 914 | * | ||
| 915 | * 2) maxpacket < length <= (2 * maxpacket) | ||
| 916 | * - DATA1, DATA0 | ||
| 917 | * | ||
| 918 | * 3) (2 * maxpacket) < length <= (3 * maxpacket) | ||
| 919 | * - DATA2, DATA1, DATA0 | ||
| 920 | */ | ||
| 899 | if (speed == USB_SPEED_HIGH) { | 921 | if (speed == USB_SPEED_HIGH) { |
| 900 | struct usb_ep *ep = &dep->endpoint; | 922 | struct usb_ep *ep = &dep->endpoint; |
| 901 | trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1); | 923 | unsigned int mult = ep->mult - 1; |
| 924 | unsigned int maxp = usb_endpoint_maxp(ep->desc); | ||
| 925 | |||
| 926 | if (length <= (2 * maxp)) | ||
| 927 | mult--; | ||
| 928 | |||
| 929 | if (length <= maxp) | ||
| 930 | mult--; | ||
| 931 | |||
| 932 | trb->size |= DWC3_TRB_SIZE_PCM1(mult); | ||
| 902 | } | 933 | } |
| 903 | } else { | 934 | } else { |
| 904 | trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; | 935 | trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; |
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 62dc9c7798e7..e1de8fe599a3 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c | |||
| @@ -838,21 +838,32 @@ static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep | |||
| 838 | return usb3_req; | 838 | return usb3_req; |
| 839 | } | 839 | } |
| 840 | 840 | ||
| 841 | static void usb3_request_done(struct renesas_usb3_ep *usb3_ep, | 841 | static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep, |
| 842 | struct renesas_usb3_request *usb3_req, int status) | 842 | struct renesas_usb3_request *usb3_req, |
| 843 | int status) | ||
| 843 | { | 844 | { |
| 844 | struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); | 845 | struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); |
| 845 | unsigned long flags; | ||
| 846 | 846 | ||
| 847 | dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n", | 847 | dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n", |
| 848 | usb3_ep->num, usb3_req->req.length, usb3_req->req.actual, | 848 | usb3_ep->num, usb3_req->req.length, usb3_req->req.actual, |
| 849 | status); | 849 | status); |
| 850 | usb3_req->req.status = status; | 850 | usb3_req->req.status = status; |
| 851 | spin_lock_irqsave(&usb3->lock, flags); | ||
| 852 | usb3_ep->started = false; | 851 | usb3_ep->started = false; |
| 853 | list_del_init(&usb3_req->queue); | 852 | list_del_init(&usb3_req->queue); |
| 854 | spin_unlock_irqrestore(&usb3->lock, flags); | 853 | spin_unlock(&usb3->lock); |
| 855 | usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req); | 854 | usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req); |
| 855 | spin_lock(&usb3->lock); | ||
| 856 | } | ||
| 857 | |||
| 858 | static void usb3_request_done(struct renesas_usb3_ep *usb3_ep, | ||
| 859 | struct renesas_usb3_request *usb3_req, int status) | ||
| 860 | { | ||
| 861 | struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); | ||
| 862 | unsigned long flags; | ||
| 863 | |||
| 864 | spin_lock_irqsave(&usb3->lock, flags); | ||
| 865 | __usb3_request_done(usb3_ep, usb3_req, status); | ||
| 866 | spin_unlock_irqrestore(&usb3->lock, flags); | ||
| 856 | } | 867 | } |
| 857 | 868 | ||
| 858 | static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3) | 869 | static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3) |
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 858fefd67ebe..c8f38649f749 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c | |||
| @@ -98,6 +98,7 @@ enum amd_chipset_gen { | |||
| 98 | AMD_CHIPSET_HUDSON2, | 98 | AMD_CHIPSET_HUDSON2, |
| 99 | AMD_CHIPSET_BOLTON, | 99 | AMD_CHIPSET_BOLTON, |
| 100 | AMD_CHIPSET_YANGTZE, | 100 | AMD_CHIPSET_YANGTZE, |
| 101 | AMD_CHIPSET_TAISHAN, | ||
| 101 | AMD_CHIPSET_UNKNOWN, | 102 | AMD_CHIPSET_UNKNOWN, |
| 102 | }; | 103 | }; |
| 103 | 104 | ||
| @@ -141,6 +142,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo) | |||
| 141 | pinfo->sb_type.gen = AMD_CHIPSET_SB700; | 142 | pinfo->sb_type.gen = AMD_CHIPSET_SB700; |
| 142 | else if (rev >= 0x40 && rev <= 0x4f) | 143 | else if (rev >= 0x40 && rev <= 0x4f) |
| 143 | pinfo->sb_type.gen = AMD_CHIPSET_SB800; | 144 | pinfo->sb_type.gen = AMD_CHIPSET_SB800; |
| 145 | } | ||
| 146 | pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, | ||
| 147 | 0x145c, NULL); | ||
| 148 | if (pinfo->smbus_dev) { | ||
| 149 | pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN; | ||
| 144 | } else { | 150 | } else { |
| 145 | pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, | 151 | pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, |
| 146 | PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); | 152 | PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); |
| @@ -260,11 +266,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev) | |||
| 260 | { | 266 | { |
| 261 | /* Make sure amd chipset type has already been initialized */ | 267 | /* Make sure amd chipset type has already been initialized */ |
| 262 | usb_amd_find_chipset_info(); | 268 | usb_amd_find_chipset_info(); |
| 263 | if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE) | 269 | if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE || |
| 264 | return 0; | 270 | amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) { |
| 265 | 271 | dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); | |
| 266 | dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); | 272 | return 1; |
| 267 | return 1; | 273 | } |
| 274 | return 0; | ||
| 268 | } | 275 | } |
| 269 | EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); | 276 | EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); |
| 270 | 277 | ||
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 76decb8011eb..3344ffd5bb13 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
| @@ -139,6 +139,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) | |||
| 139 | "Could not flush host TX%d fifo: csr: %04x\n", | 139 | "Could not flush host TX%d fifo: csr: %04x\n", |
| 140 | ep->epnum, csr)) | 140 | ep->epnum, csr)) |
| 141 | return; | 141 | return; |
| 142 | mdelay(1); | ||
| 142 | } | 143 | } |
| 143 | } | 144 | } |
| 144 | 145 | ||
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index 8fb86a5f458e..3d0dd2f97415 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c | |||
| @@ -197,6 +197,7 @@ struct msm_otg { | |||
| 197 | struct regulator *v3p3; | 197 | struct regulator *v3p3; |
| 198 | struct regulator *v1p8; | 198 | struct regulator *v1p8; |
| 199 | struct regulator *vddcx; | 199 | struct regulator *vddcx; |
| 200 | struct regulator_bulk_data supplies[3]; | ||
| 200 | 201 | ||
| 201 | struct reset_control *phy_rst; | 202 | struct reset_control *phy_rst; |
| 202 | struct reset_control *link_rst; | 203 | struct reset_control *link_rst; |
| @@ -1731,7 +1732,6 @@ static int msm_otg_reboot_notify(struct notifier_block *this, | |||
| 1731 | 1732 | ||
| 1732 | static int msm_otg_probe(struct platform_device *pdev) | 1733 | static int msm_otg_probe(struct platform_device *pdev) |
| 1733 | { | 1734 | { |
| 1734 | struct regulator_bulk_data regs[3]; | ||
| 1735 | int ret = 0; | 1735 | int ret = 0; |
| 1736 | struct device_node *np = pdev->dev.of_node; | 1736 | struct device_node *np = pdev->dev.of_node; |
| 1737 | struct msm_otg_platform_data *pdata; | 1737 | struct msm_otg_platform_data *pdata; |
| @@ -1817,17 +1817,18 @@ static int msm_otg_probe(struct platform_device *pdev) | |||
| 1817 | return motg->irq; | 1817 | return motg->irq; |
| 1818 | } | 1818 | } |
| 1819 | 1819 | ||
| 1820 | regs[0].supply = "vddcx"; | 1820 | motg->supplies[0].supply = "vddcx"; |
| 1821 | regs[1].supply = "v3p3"; | 1821 | motg->supplies[1].supply = "v3p3"; |
| 1822 | regs[2].supply = "v1p8"; | 1822 | motg->supplies[2].supply = "v1p8"; |
| 1823 | 1823 | ||
| 1824 | ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs); | 1824 | ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(motg->supplies), |
| 1825 | motg->supplies); | ||
| 1825 | if (ret) | 1826 | if (ret) |
| 1826 | return ret; | 1827 | return ret; |
| 1827 | 1828 | ||
| 1828 | motg->vddcx = regs[0].consumer; | 1829 | motg->vddcx = motg->supplies[0].consumer; |
| 1829 | motg->v3p3 = regs[1].consumer; | 1830 | motg->v3p3 = motg->supplies[1].consumer; |
| 1830 | motg->v1p8 = regs[2].consumer; | 1831 | motg->v1p8 = motg->supplies[2].consumer; |
| 1831 | 1832 | ||
| 1832 | clk_set_rate(motg->clk, 60000000); | 1833 | clk_set_rate(motg->clk, 60000000); |
| 1833 | 1834 | ||
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 93fba9033b00..2c8161bcf5b5 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c | |||
| @@ -639,14 +639,11 @@ static int usbhsg_ep_disable(struct usb_ep *ep) | |||
| 639 | struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); | 639 | struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); |
| 640 | struct usbhs_pipe *pipe; | 640 | struct usbhs_pipe *pipe; |
| 641 | unsigned long flags; | 641 | unsigned long flags; |
| 642 | int ret = 0; | ||
| 643 | 642 | ||
| 644 | spin_lock_irqsave(&uep->lock, flags); | 643 | spin_lock_irqsave(&uep->lock, flags); |
| 645 | pipe = usbhsg_uep_to_pipe(uep); | 644 | pipe = usbhsg_uep_to_pipe(uep); |
| 646 | if (!pipe) { | 645 | if (!pipe) |
| 647 | ret = -EINVAL; | ||
| 648 | goto out; | 646 | goto out; |
| 649 | } | ||
| 650 | 647 | ||
| 651 | usbhsg_pipe_disable(uep); | 648 | usbhsg_pipe_disable(uep); |
| 652 | usbhs_pipe_free(pipe); | 649 | usbhs_pipe_free(pipe); |
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c index d544b331c9f2..02b67abfc2a1 100644 --- a/drivers/usb/renesas_usbhs/rcar3.c +++ b/drivers/usb/renesas_usbhs/rcar3.c | |||
| @@ -20,9 +20,13 @@ | |||
| 20 | /* Low Power Status register (LPSTS) */ | 20 | /* Low Power Status register (LPSTS) */ |
| 21 | #define LPSTS_SUSPM 0x4000 | 21 | #define LPSTS_SUSPM 0x4000 |
| 22 | 22 | ||
| 23 | /* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */ | 23 | /* |
| 24 | * USB General control register 2 (UGCTRL2) | ||
| 25 | * Remarks: bit[31:11] and bit[9:6] should be 0 | ||
| 26 | */ | ||
| 24 | #define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */ | 27 | #define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */ |
| 25 | #define UGCTRL2_USB0SEL_OTG 0x00000030 | 28 | #define UGCTRL2_USB0SEL_OTG 0x00000030 |
| 29 | #define UGCTRL2_VBUSSEL 0x00000400 | ||
| 26 | 30 | ||
| 27 | static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data) | 31 | static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data) |
| 28 | { | 32 | { |
| @@ -34,7 +38,8 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev, | |||
| 34 | { | 38 | { |
| 35 | struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev); | 39 | struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev); |
| 36 | 40 | ||
| 37 | usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG); | 41 | usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG | |
| 42 | UGCTRL2_VBUSSEL); | ||
| 38 | 43 | ||
| 39 | if (enable) { | 44 | if (enable) { |
| 40 | usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM); | 45 | usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM); |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f64e914a8985..2d945c9f975c 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
| @@ -142,6 +142,7 @@ static const struct usb_device_id id_table[] = { | |||
| 142 | { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ | 142 | { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ |
| 143 | { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ | 143 | { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ |
| 144 | { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ | 144 | { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ |
| 145 | { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ | ||
| 145 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ | 146 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
| 146 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ | 147 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ |
| 147 | { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ | 148 | { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index ebe51f11105d..fe123153b1a5 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -2025,6 +2025,8 @@ static const struct usb_device_id option_ids[] = { | |||
| 2025 | { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ | 2025 | { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ |
| 2026 | { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ | 2026 | { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ |
| 2027 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 2027 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
| 2028 | { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ | ||
| 2029 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | ||
| 2028 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ | 2030 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ |
| 2029 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ | 2031 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ |
| 2030 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ | 2032 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ |
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index c9ebefd8f35f..a585b477415d 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
| @@ -52,6 +52,8 @@ static const struct usb_device_id id_table[] = { | |||
| 52 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, | 52 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, |
| 53 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), | 53 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), |
| 54 | .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, | 54 | .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, |
| 55 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485), | ||
| 56 | .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, | ||
| 55 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, | 57 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, |
| 56 | { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, | 58 | { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, |
| 57 | { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, | 59 | { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, |
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 09d9be88209e..3b5a15d1dc0d 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #define ATEN_VENDOR_ID 0x0557 | 27 | #define ATEN_VENDOR_ID 0x0557 |
| 28 | #define ATEN_VENDOR_ID2 0x0547 | 28 | #define ATEN_VENDOR_ID2 0x0547 |
| 29 | #define ATEN_PRODUCT_ID 0x2008 | 29 | #define ATEN_PRODUCT_ID 0x2008 |
| 30 | #define ATEN_PRODUCT_UC485 0x2021 | ||
| 30 | #define ATEN_PRODUCT_ID2 0x2118 | 31 | #define ATEN_PRODUCT_ID2 0x2118 |
| 31 | 32 | ||
| 32 | #define IODATA_VENDOR_ID 0x04bb | 33 | #define IODATA_VENDOR_ID 0x04bb |
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index cbea9f329e71..cde115359793 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h | |||
| @@ -124,9 +124,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999, | |||
| 124 | /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ | 124 | /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ |
| 125 | UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, | 125 | UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, |
| 126 | "Initio Corporation", | 126 | "Initio Corporation", |
| 127 | "", | 127 | "INIC-3069", |
| 128 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 128 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
| 129 | US_FL_NO_ATA_1X), | 129 | US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE), |
| 130 | 130 | ||
| 131 | /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ | 131 | /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ |
| 132 | UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, | 132 | UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 06615934fed1..0dceb9fa3a06 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
| @@ -315,6 +315,7 @@ static int usb_stor_control_thread(void * __us) | |||
| 315 | { | 315 | { |
| 316 | struct us_data *us = (struct us_data *)__us; | 316 | struct us_data *us = (struct us_data *)__us; |
| 317 | struct Scsi_Host *host = us_to_host(us); | 317 | struct Scsi_Host *host = us_to_host(us); |
| 318 | struct scsi_cmnd *srb; | ||
| 318 | 319 | ||
| 319 | for (;;) { | 320 | for (;;) { |
| 320 | usb_stor_dbg(us, "*** thread sleeping\n"); | 321 | usb_stor_dbg(us, "*** thread sleeping\n"); |
| @@ -330,6 +331,7 @@ static int usb_stor_control_thread(void * __us) | |||
| 330 | scsi_lock(host); | 331 | scsi_lock(host); |
| 331 | 332 | ||
| 332 | /* When we are called with no command pending, we're done */ | 333 | /* When we are called with no command pending, we're done */ |
| 334 | srb = us->srb; | ||
| 333 | if (us->srb == NULL) { | 335 | if (us->srb == NULL) { |
| 334 | scsi_unlock(host); | 336 | scsi_unlock(host); |
| 335 | mutex_unlock(&us->dev_mutex); | 337 | mutex_unlock(&us->dev_mutex); |
| @@ -398,14 +400,11 @@ static int usb_stor_control_thread(void * __us) | |||
| 398 | /* lock access to the state */ | 400 | /* lock access to the state */ |
| 399 | scsi_lock(host); | 401 | scsi_lock(host); |
| 400 | 402 | ||
| 401 | /* indicate that the command is done */ | 403 | /* was the command aborted? */ |
| 402 | if (us->srb->result != DID_ABORT << 16) { | 404 | if (us->srb->result == DID_ABORT << 16) { |
| 403 | usb_stor_dbg(us, "scsi cmd done, result=0x%x\n", | ||
| 404 | us->srb->result); | ||
| 405 | us->srb->scsi_done(us->srb); | ||
| 406 | } else { | ||
| 407 | SkipForAbort: | 405 | SkipForAbort: |
| 408 | usb_stor_dbg(us, "scsi command aborted\n"); | 406 | usb_stor_dbg(us, "scsi command aborted\n"); |
| 407 | srb = NULL; /* Don't call srb->scsi_done() */ | ||
| 409 | } | 408 | } |
| 410 | 409 | ||
| 411 | /* | 410 | /* |
| @@ -429,6 +428,13 @@ SkipForAbort: | |||
| 429 | 428 | ||
| 430 | /* unlock the device pointers */ | 429 | /* unlock the device pointers */ |
| 431 | mutex_unlock(&us->dev_mutex); | 430 | mutex_unlock(&us->dev_mutex); |
| 431 | |||
| 432 | /* now that the locks are released, notify the SCSI core */ | ||
| 433 | if (srb) { | ||
| 434 | usb_stor_dbg(us, "scsi cmd done, result=0x%x\n", | ||
| 435 | srb->result); | ||
| 436 | srb->scsi_done(srb); | ||
| 437 | } | ||
| 432 | } /* for (;;) */ | 438 | } /* for (;;) */ |
| 433 | 439 | ||
| 434 | /* Wait until we are told to stop */ | 440 | /* Wait until we are told to stop */ |
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index ff01bed7112f..1e784adb89b1 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <asm/efi.h> | 17 | #include <asm/efi.h> |
| 18 | 18 | ||
| 19 | static bool request_mem_succeeded = false; | 19 | static bool request_mem_succeeded = false; |
| 20 | static bool nowc = false; | ||
| 20 | 21 | ||
| 21 | static struct fb_var_screeninfo efifb_defined = { | 22 | static struct fb_var_screeninfo efifb_defined = { |
| 22 | .activate = FB_ACTIVATE_NOW, | 23 | .activate = FB_ACTIVATE_NOW, |
| @@ -99,6 +100,8 @@ static int efifb_setup(char *options) | |||
| 99 | screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0); | 100 | screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0); |
| 100 | else if (!strncmp(this_opt, "width:", 6)) | 101 | else if (!strncmp(this_opt, "width:", 6)) |
| 101 | screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0); | 102 | screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0); |
| 103 | else if (!strcmp(this_opt, "nowc")) | ||
| 104 | nowc = true; | ||
| 102 | } | 105 | } |
| 103 | } | 106 | } |
| 104 | 107 | ||
| @@ -255,7 +258,10 @@ static int efifb_probe(struct platform_device *dev) | |||
| 255 | info->apertures->ranges[0].base = efifb_fix.smem_start; | 258 | info->apertures->ranges[0].base = efifb_fix.smem_start; |
| 256 | info->apertures->ranges[0].size = size_remap; | 259 | info->apertures->ranges[0].size = size_remap; |
| 257 | 260 | ||
| 258 | info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len); | 261 | if (nowc) |
| 262 | info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); | ||
| 263 | else | ||
| 264 | info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len); | ||
| 259 | if (!info->screen_base) { | 265 | if (!info->screen_base) { |
| 260 | pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n", | 266 | pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n", |
| 261 | efifb_fix.smem_len, efifb_fix.smem_start); | 267 | efifb_fix.smem_len, efifb_fix.smem_start); |
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c index c166e0725be5..ba82f97fb42b 100644 --- a/drivers/video/fbdev/imxfb.c +++ b/drivers/video/fbdev/imxfb.c | |||
| @@ -1073,20 +1073,16 @@ static int imxfb_remove(struct platform_device *pdev) | |||
| 1073 | imxfb_disable_controller(fbi); | 1073 | imxfb_disable_controller(fbi); |
| 1074 | 1074 | ||
| 1075 | unregister_framebuffer(info); | 1075 | unregister_framebuffer(info); |
| 1076 | 1076 | fb_dealloc_cmap(&info->cmap); | |
| 1077 | pdata = dev_get_platdata(&pdev->dev); | 1077 | pdata = dev_get_platdata(&pdev->dev); |
| 1078 | if (pdata && pdata->exit) | 1078 | if (pdata && pdata->exit) |
| 1079 | pdata->exit(fbi->pdev); | 1079 | pdata->exit(fbi->pdev); |
| 1080 | |||
| 1081 | fb_dealloc_cmap(&info->cmap); | ||
| 1082 | kfree(info->pseudo_palette); | ||
| 1083 | framebuffer_release(info); | ||
| 1084 | |||
| 1085 | dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base, | 1080 | dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base, |
| 1086 | fbi->map_dma); | 1081 | fbi->map_dma); |
| 1087 | |||
| 1088 | iounmap(fbi->regs); | 1082 | iounmap(fbi->regs); |
| 1089 | release_mem_region(res->start, resource_size(res)); | 1083 | release_mem_region(res->start, resource_size(res)); |
| 1084 | kfree(info->pseudo_palette); | ||
| 1085 | framebuffer_release(info); | ||
| 1090 | 1086 | ||
| 1091 | return 0; | 1087 | return 0; |
| 1092 | } | 1088 | } |
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c index eecf695c16f4..09e5bb013d28 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/core.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c | |||
| @@ -193,7 +193,6 @@ static struct notifier_block omap_dss_pm_notif_block = { | |||
| 193 | 193 | ||
| 194 | static int __init omap_dss_probe(struct platform_device *pdev) | 194 | static int __init omap_dss_probe(struct platform_device *pdev) |
| 195 | { | 195 | { |
| 196 | struct omap_dss_board_info *pdata = pdev->dev.platform_data; | ||
| 197 | int r; | 196 | int r; |
| 198 | 197 | ||
| 199 | core.pdev = pdev; | 198 | core.pdev = pdev; |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 8feab810aed9..7f188b8d0c67 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
| @@ -7,9 +7,6 @@ obj-y += xenbus/ | |||
| 7 | nostackp := $(call cc-option, -fno-stack-protector) | 7 | nostackp := $(call cc-option, -fno-stack-protector) |
| 8 | CFLAGS_features.o := $(nostackp) | 8 | CFLAGS_features.o := $(nostackp) |
| 9 | 9 | ||
| 10 | CFLAGS_efi.o += -fshort-wchar | ||
| 11 | LDFLAGS += $(call ld-option, --no-wchar-size-warning) | ||
| 12 | |||
| 13 | dom0-$(CONFIG_ARM64) += arm-device.o | 10 | dom0-$(CONFIG_ARM64) += arm-device.o |
| 14 | dom0-$(CONFIG_PCI) += pci.o | 11 | dom0-$(CONFIG_PCI) += pci.o |
| 15 | dom0-$(CONFIG_USB_SUPPORT) += dbgp.o | 12 | dom0-$(CONFIG_USB_SUPPORT) += dbgp.o |
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c index 4da69dbf7dca..1bdd02a6d6ac 100644 --- a/drivers/xen/biomerge.c +++ b/drivers/xen/biomerge.c | |||
| @@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, | |||
| 10 | unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page)); | 10 | unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page)); |
| 11 | unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page)); | 11 | unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page)); |
| 12 | 12 | ||
| 13 | return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && | 13 | return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2; |
| 14 | ((bfn1 == bfn2) || ((bfn1+1) == bfn2)); | ||
| 15 | #else | 14 | #else |
| 16 | /* | 15 | /* |
| 17 | * XXX: Add support for merging bio_vec when using different page | 16 | * XXX: Add support for merging bio_vec when using different page |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index bae1f5d36c26..2d43118077e4 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
| @@ -574,7 +574,7 @@ static void shutdown_pirq(struct irq_data *data) | |||
| 574 | 574 | ||
| 575 | static void enable_pirq(struct irq_data *data) | 575 | static void enable_pirq(struct irq_data *data) |
| 576 | { | 576 | { |
| 577 | startup_pirq(data); | 577 | enable_dynirq(data); |
| 578 | } | 578 | } |
| 579 | 579 | ||
| 580 | static void disable_pirq(struct irq_data *data) | 580 | static void disable_pirq(struct irq_data *data) |
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index e46080214955..3e59590c7254 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c | |||
| @@ -857,6 +857,8 @@ static int xenwatch_thread(void *unused) | |||
| 857 | struct list_head *ent; | 857 | struct list_head *ent; |
| 858 | struct xs_watch_event *event; | 858 | struct xs_watch_event *event; |
| 859 | 859 | ||
| 860 | xenwatch_pid = current->pid; | ||
| 861 | |||
| 860 | for (;;) { | 862 | for (;;) { |
| 861 | wait_event_interruptible(watch_events_waitq, | 863 | wait_event_interruptible(watch_events_waitq, |
| 862 | !list_empty(&watch_events)); | 864 | !list_empty(&watch_events)); |
| @@ -925,7 +927,6 @@ int xs_init(void) | |||
| 925 | task = kthread_run(xenwatch_thread, NULL, "xenwatch"); | 927 | task = kthread_run(xenwatch_thread, NULL, "xenwatch"); |
| 926 | if (IS_ERR(task)) | 928 | if (IS_ERR(task)) |
| 927 | return PTR_ERR(task); | 929 | return PTR_ERR(task); |
| 928 | xenwatch_pid = task->pid; | ||
| 929 | 930 | ||
| 930 | /* shutdown watches for kexec boot */ | 931 | /* shutdown watches for kexec boot */ |
| 931 | xs_reset_watches(); | 932 | xs_reset_watches(); |
