diff options
Diffstat (limited to 'drivers')
337 files changed, 3723 insertions, 2148 deletions
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index a1d177d58254..21932d640a41 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c | |||
| @@ -108,7 +108,9 @@ acpi_ex_add_table(u32 table_index, | |||
| 108 | 108 | ||
| 109 | /* Add the table to the namespace */ | 109 | /* Add the table to the namespace */ |
| 110 | 110 | ||
| 111 | acpi_ex_exit_interpreter(); | ||
| 111 | status = acpi_ns_load_table(table_index, parent_node); | 112 | status = acpi_ns_load_table(table_index, parent_node); |
| 113 | acpi_ex_enter_interpreter(); | ||
| 112 | if (ACPI_FAILURE(status)) { | 114 | if (ACPI_FAILURE(status)) { |
| 113 | acpi_ut_remove_reference(obj_desc); | 115 | acpi_ut_remove_reference(obj_desc); |
| 114 | *ddb_handle = NULL; | 116 | *ddb_handle = NULL; |
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index daceb80022b0..3b7fb99362b6 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c | |||
| @@ -306,12 +306,6 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) | |||
| 306 | acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) | 306 | acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) |
| 307 | { | 307 | { |
| 308 | u64 address; | 308 | u64 address; |
| 309 | u8 access_width; | ||
| 310 | u32 bit_width; | ||
| 311 | u8 bit_offset; | ||
| 312 | u64 value64; | ||
| 313 | u32 new_value32, old_value32; | ||
| 314 | u8 index; | ||
| 315 | acpi_status status; | 309 | acpi_status status; |
| 316 | 310 | ||
| 317 | ACPI_FUNCTION_NAME(hw_write); | 311 | ACPI_FUNCTION_NAME(hw_write); |
| @@ -323,145 +317,23 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) | |||
| 323 | return (status); | 317 | return (status); |
| 324 | } | 318 | } |
| 325 | 319 | ||
| 326 | /* Convert access_width into number of bits based */ | ||
| 327 | |||
| 328 | access_width = acpi_hw_get_access_bit_width(reg, 32); | ||
| 329 | bit_width = reg->bit_offset + reg->bit_width; | ||
| 330 | bit_offset = reg->bit_offset; | ||
| 331 | |||
| 332 | /* | 320 | /* |
| 333 | * Two address spaces supported: Memory or IO. PCI_Config is | 321 | * Two address spaces supported: Memory or IO. PCI_Config is |
| 334 | * not supported here because the GAS structure is insufficient | 322 | * not supported here because the GAS structure is insufficient |
| 335 | */ | 323 | */ |
| 336 | index = 0; | 324 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { |
| 337 | while (bit_width) { | 325 | status = acpi_os_write_memory((acpi_physical_address) |
| 338 | /* | 326 | address, (u64)value, |
| 339 | * Use offset style bit reads because "Index * AccessWidth" is | 327 | reg->bit_width); |
| 340 | * ensured to be less than 32-bits by acpi_hw_validate_register(). | 328 | } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ |
| 341 | */ | 329 | |
| 342 | new_value32 = ACPI_GET_BITS(&value, index * access_width, | 330 | status = acpi_hw_write_port((acpi_io_address) |
| 343 | ACPI_MASK_BITS_ABOVE_32 | 331 | address, value, reg->bit_width); |
| 344 | (access_width)); | ||
| 345 | |||
| 346 | if (bit_offset >= access_width) { | ||
| 347 | bit_offset -= access_width; | ||
| 348 | } else { | ||
| 349 | /* | ||
| 350 | * Use offset style bit masks because access_width is ensured | ||
| 351 | * to be less than 32-bits by acpi_hw_validate_register() and | ||
| 352 | * bit_offset/bit_width is less than access_width here. | ||
| 353 | */ | ||
| 354 | if (bit_offset) { | ||
| 355 | new_value32 &= ACPI_MASK_BITS_BELOW(bit_offset); | ||
| 356 | } | ||
| 357 | if (bit_width < access_width) { | ||
| 358 | new_value32 &= ACPI_MASK_BITS_ABOVE(bit_width); | ||
| 359 | } | ||
| 360 | |||
| 361 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { | ||
| 362 | if (bit_offset || bit_width < access_width) { | ||
| 363 | /* | ||
| 364 | * Read old values in order not to modify the bits that | ||
| 365 | * are beyond the register bit_width/bit_offset setting. | ||
| 366 | */ | ||
| 367 | status = | ||
| 368 | acpi_os_read_memory((acpi_physical_address) | ||
| 369 | address + | ||
| 370 | index * | ||
| 371 | ACPI_DIV_8 | ||
| 372 | (access_width), | ||
| 373 | &value64, | ||
| 374 | access_width); | ||
| 375 | old_value32 = (u32)value64; | ||
| 376 | |||
| 377 | /* | ||
| 378 | * Use offset style bit masks because access_width is | ||
| 379 | * ensured to be less than 32-bits by | ||
| 380 | * acpi_hw_validate_register() and bit_offset/bit_width is | ||
| 381 | * less than access_width here. | ||
| 382 | */ | ||
| 383 | if (bit_offset) { | ||
| 384 | old_value32 &= | ||
| 385 | ACPI_MASK_BITS_ABOVE | ||
| 386 | (bit_offset); | ||
| 387 | bit_offset = 0; | ||
| 388 | } | ||
| 389 | if (bit_width < access_width) { | ||
| 390 | old_value32 &= | ||
| 391 | ACPI_MASK_BITS_BELOW | ||
| 392 | (bit_width); | ||
| 393 | } | ||
| 394 | |||
| 395 | new_value32 |= old_value32; | ||
| 396 | } | ||
| 397 | |||
| 398 | value64 = (u64)new_value32; | ||
| 399 | status = | ||
| 400 | acpi_os_write_memory((acpi_physical_address) | ||
| 401 | address + | ||
| 402 | index * | ||
| 403 | ACPI_DIV_8 | ||
| 404 | (access_width), | ||
| 405 | value64, access_width); | ||
| 406 | } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ | ||
| 407 | |||
| 408 | if (bit_offset || bit_width < access_width) { | ||
| 409 | /* | ||
| 410 | * Read old values in order not to modify the bits that | ||
| 411 | * are beyond the register bit_width/bit_offset setting. | ||
| 412 | */ | ||
| 413 | status = | ||
| 414 | acpi_hw_read_port((acpi_io_address) | ||
| 415 | address + | ||
| 416 | index * | ||
| 417 | ACPI_DIV_8 | ||
| 418 | (access_width), | ||
| 419 | &old_value32, | ||
| 420 | access_width); | ||
| 421 | |||
| 422 | /* | ||
| 423 | * Use offset style bit masks because access_width is | ||
| 424 | * ensured to be less than 32-bits by | ||
| 425 | * acpi_hw_validate_register() and bit_offset/bit_width is | ||
| 426 | * less than access_width here. | ||
| 427 | */ | ||
| 428 | if (bit_offset) { | ||
| 429 | old_value32 &= | ||
| 430 | ACPI_MASK_BITS_ABOVE | ||
| 431 | (bit_offset); | ||
| 432 | bit_offset = 0; | ||
| 433 | } | ||
| 434 | if (bit_width < access_width) { | ||
| 435 | old_value32 &= | ||
| 436 | ACPI_MASK_BITS_BELOW | ||
| 437 | (bit_width); | ||
| 438 | } | ||
| 439 | |||
| 440 | new_value32 |= old_value32; | ||
| 441 | } | ||
| 442 | |||
| 443 | status = acpi_hw_write_port((acpi_io_address) | ||
| 444 | address + | ||
| 445 | index * | ||
| 446 | ACPI_DIV_8 | ||
| 447 | (access_width), | ||
| 448 | new_value32, | ||
| 449 | access_width); | ||
| 450 | } | ||
| 451 | } | ||
| 452 | |||
| 453 | /* | ||
| 454 | * Index * access_width is ensured to be less than 32-bits by | ||
| 455 | * acpi_hw_validate_register(). | ||
| 456 | */ | ||
| 457 | bit_width -= | ||
| 458 | bit_width > access_width ? access_width : bit_width; | ||
| 459 | index++; | ||
| 460 | } | 332 | } |
| 461 | 333 | ||
| 462 | ACPI_DEBUG_PRINT((ACPI_DB_IO, | 334 | ACPI_DEBUG_PRINT((ACPI_DB_IO, |
| 463 | "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", | 335 | "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", |
| 464 | value, access_width, ACPI_FORMAT_UINT64(address), | 336 | value, reg->bit_width, ACPI_FORMAT_UINT64(address), |
| 465 | acpi_ut_get_region_name(reg->space_id))); | 337 | acpi_ut_get_region_name(reg->space_id))); |
| 466 | 338 | ||
| 467 | return (status); | 339 | return (status); |
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index f631a47724f0..1783cd7e1446 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | #include "acparser.h" | 47 | #include "acparser.h" |
| 48 | #include "acdispat.h" | 48 | #include "acdispat.h" |
| 49 | #include "actables.h" | 49 | #include "actables.h" |
| 50 | #include "acinterp.h" | ||
| 50 | 51 | ||
| 51 | #define _COMPONENT ACPI_NAMESPACE | 52 | #define _COMPONENT ACPI_NAMESPACE |
| 52 | ACPI_MODULE_NAME("nsparse") | 53 | ACPI_MODULE_NAME("nsparse") |
| @@ -170,6 +171,8 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) | |||
| 170 | 171 | ||
| 171 | ACPI_FUNCTION_TRACE(ns_parse_table); | 172 | ACPI_FUNCTION_TRACE(ns_parse_table); |
| 172 | 173 | ||
| 174 | acpi_ex_enter_interpreter(); | ||
| 175 | |||
| 173 | /* | 176 | /* |
| 174 | * AML Parse, pass 1 | 177 | * AML Parse, pass 1 |
| 175 | * | 178 | * |
| @@ -185,7 +188,7 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) | |||
| 185 | status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, | 188 | status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, |
| 186 | table_index, start_node); | 189 | table_index, start_node); |
| 187 | if (ACPI_FAILURE(status)) { | 190 | if (ACPI_FAILURE(status)) { |
| 188 | return_ACPI_STATUS(status); | 191 | goto error_exit; |
| 189 | } | 192 | } |
| 190 | 193 | ||
| 191 | /* | 194 | /* |
| @@ -201,8 +204,10 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) | |||
| 201 | status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, | 204 | status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, |
| 202 | table_index, start_node); | 205 | table_index, start_node); |
| 203 | if (ACPI_FAILURE(status)) { | 206 | if (ACPI_FAILURE(status)) { |
| 204 | return_ACPI_STATUS(status); | 207 | goto error_exit; |
| 205 | } | 208 | } |
| 206 | 209 | ||
| 210 | error_exit: | ||
| 211 | acpi_ex_exit_interpreter(); | ||
| 207 | return_ACPI_STATUS(status); | 212 | return_ACPI_STATUS(status); |
| 208 | } | 213 | } |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 31e8da648fff..262ca31b86d9 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -1051,7 +1051,7 @@ static int __init acpi_bus_init(void) | |||
| 1051 | * Maybe EC region is required at bus_scan/acpi_get_devices. So it | 1051 | * Maybe EC region is required at bus_scan/acpi_get_devices. So it |
| 1052 | * is necessary to enable it as early as possible. | 1052 | * is necessary to enable it as early as possible. |
| 1053 | */ | 1053 | */ |
| 1054 | acpi_boot_ec_enable(); | 1054 | acpi_ec_dsdt_probe(); |
| 1055 | 1055 | ||
| 1056 | printk(KERN_INFO PREFIX "Interpreter enabled\n"); | 1056 | printk(KERN_INFO PREFIX "Interpreter enabled\n"); |
| 1057 | 1057 | ||
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 0e70181f150c..73c76d646064 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -1446,10 +1446,30 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context) | |||
| 1446 | return AE_OK; | 1446 | return AE_OK; |
| 1447 | } | 1447 | } |
| 1448 | 1448 | ||
| 1449 | int __init acpi_boot_ec_enable(void) | 1449 | static const struct acpi_device_id ec_device_ids[] = { |
| 1450 | {"PNP0C09", 0}, | ||
| 1451 | {"", 0}, | ||
| 1452 | }; | ||
| 1453 | |||
| 1454 | int __init acpi_ec_dsdt_probe(void) | ||
| 1450 | { | 1455 | { |
| 1451 | if (!boot_ec) | 1456 | acpi_status status; |
| 1457 | |||
| 1458 | if (boot_ec) | ||
| 1452 | return 0; | 1459 | return 0; |
| 1460 | |||
| 1461 | /* | ||
| 1462 | * Finding EC from DSDT if there is no ECDT EC available. When this | ||
| 1463 | * function is invoked, ACPI tables have been fully loaded, we can | ||
| 1464 | * walk namespace now. | ||
| 1465 | */ | ||
| 1466 | boot_ec = make_acpi_ec(); | ||
| 1467 | if (!boot_ec) | ||
| 1468 | return -ENOMEM; | ||
| 1469 | status = acpi_get_devices(ec_device_ids[0].id, | ||
| 1470 | ec_parse_device, boot_ec, NULL); | ||
| 1471 | if (ACPI_FAILURE(status) || !boot_ec->handle) | ||
| 1472 | return -ENODEV; | ||
| 1453 | if (!ec_install_handlers(boot_ec)) { | 1473 | if (!ec_install_handlers(boot_ec)) { |
| 1454 | first_ec = boot_ec; | 1474 | first_ec = boot_ec; |
| 1455 | return 0; | 1475 | return 0; |
| @@ -1457,11 +1477,6 @@ int __init acpi_boot_ec_enable(void) | |||
| 1457 | return -EFAULT; | 1477 | return -EFAULT; |
| 1458 | } | 1478 | } |
| 1459 | 1479 | ||
| 1460 | static const struct acpi_device_id ec_device_ids[] = { | ||
| 1461 | {"PNP0C09", 0}, | ||
| 1462 | {"", 0}, | ||
| 1463 | }; | ||
| 1464 | |||
| 1465 | #if 0 | 1480 | #if 0 |
| 1466 | /* | 1481 | /* |
| 1467 | * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not | 1482 | * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 9bb0773d39bf..27cc7feabfe4 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
| @@ -181,7 +181,7 @@ typedef int (*acpi_ec_query_func) (void *data); | |||
| 181 | 181 | ||
| 182 | int acpi_ec_init(void); | 182 | int acpi_ec_init(void); |
| 183 | int acpi_ec_ecdt_probe(void); | 183 | int acpi_ec_ecdt_probe(void); |
| 184 | int acpi_boot_ec_enable(void); | 184 | int acpi_ec_dsdt_probe(void); |
| 185 | void acpi_ec_block_transactions(void); | 185 | void acpi_ec_block_transactions(void); |
| 186 | void acpi_ec_unblock_transactions(void); | 186 | void acpi_ec_unblock_transactions(void); |
| 187 | void acpi_ec_unblock_transactions_early(void); | 187 | void acpi_ec_unblock_transactions_early(void); |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 61dc7a99e89a..c6f017458958 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
| @@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
| 606 | ata_scsi_port_error_handler(host, ap); | 606 | ata_scsi_port_error_handler(host, ap); |
| 607 | 607 | ||
| 608 | /* finish or retry handled scmd's and clean up */ | 608 | /* finish or retry handled scmd's and clean up */ |
| 609 | WARN_ON(host->host_failed || !list_empty(&eh_work_q)); | 609 | WARN_ON(!list_empty(&eh_work_q)); |
| 610 | 610 | ||
| 611 | DPRINTK("EXIT\n"); | 611 | DPRINTK("EXIT\n"); |
| 612 | } | 612 | } |
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 6b2a84e7f2be..2609ba20b396 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile | |||
| @@ -10,7 +10,7 @@ obj-$(CONFIG_DMA_CMA) += dma-contiguous.o | |||
| 10 | obj-y += power/ | 10 | obj-y += power/ |
| 11 | obj-$(CONFIG_HAS_DMA) += dma-mapping.o | 11 | obj-$(CONFIG_HAS_DMA) += dma-mapping.o |
| 12 | obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o | 12 | obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o |
| 13 | obj-$(CONFIG_ISA) += isa.o | 13 | obj-$(CONFIG_ISA_BUS_API) += isa.o |
| 14 | obj-$(CONFIG_FW_LOADER) += firmware_class.o | 14 | obj-$(CONFIG_FW_LOADER) += firmware_class.o |
| 15 | obj-$(CONFIG_NUMA) += node.o | 15 | obj-$(CONFIG_NUMA) += node.o |
| 16 | obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o | 16 | obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o |
diff --git a/drivers/base/isa.c b/drivers/base/isa.c index 91dba65d7264..cd6ccdcf9df0 100644 --- a/drivers/base/isa.c +++ b/drivers/base/isa.c | |||
| @@ -180,4 +180,4 @@ static int __init isa_bus_init(void) | |||
| 180 | return error; | 180 | return error; |
| 181 | } | 181 | } |
| 182 | 182 | ||
| 183 | device_initcall(isa_bus_init); | 183 | postcore_initcall(isa_bus_init); |
diff --git a/drivers/base/module.c b/drivers/base/module.c index db930d3ee312..2a215780eda2 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c | |||
| @@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv) | |||
| 24 | 24 | ||
| 25 | static void module_create_drivers_dir(struct module_kobject *mk) | 25 | static void module_create_drivers_dir(struct module_kobject *mk) |
| 26 | { | 26 | { |
| 27 | if (!mk || mk->drivers_dir) | 27 | static DEFINE_MUTEX(drivers_dir_mutex); |
| 28 | return; | ||
| 29 | 28 | ||
| 30 | mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj); | 29 | mutex_lock(&drivers_dir_mutex); |
| 30 | if (mk && !mk->drivers_dir) | ||
| 31 | mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj); | ||
| 32 | mutex_unlock(&drivers_dir_mutex); | ||
| 31 | } | 33 | } |
| 32 | 34 | ||
| 33 | void module_add_driver(struct module *mod, struct device_driver *drv) | 35 | void module_add_driver(struct module *mod, struct device_driver *drv) |
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c index 83d6e7ba1a34..8c3434bdb26d 100644 --- a/drivers/base/power/opp/cpu.c +++ b/drivers/base/power/opp/cpu.c | |||
| @@ -211,7 +211,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, | |||
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | /* Mark opp-table as multiple CPUs are sharing it now */ | 213 | /* Mark opp-table as multiple CPUs are sharing it now */ |
| 214 | opp_table->shared_opp = true; | 214 | opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; |
| 215 | } | 215 | } |
| 216 | unlock: | 216 | unlock: |
| 217 | mutex_unlock(&opp_table_lock); | 217 | mutex_unlock(&opp_table_lock); |
| @@ -227,7 +227,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); | |||
| 227 | * | 227 | * |
| 228 | * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. | 228 | * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. |
| 229 | * | 229 | * |
| 230 | * Returns -ENODEV if OPP table isn't already present. | 230 | * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP |
| 231 | * table's status is access-unknown. | ||
| 231 | * | 232 | * |
| 232 | * Locking: The internal opp_table and opp structures are RCU protected. | 233 | * Locking: The internal opp_table and opp structures are RCU protected. |
| 233 | * Hence this function internally uses RCU updater strategy with mutex locks | 234 | * Hence this function internally uses RCU updater strategy with mutex locks |
| @@ -249,9 +250,14 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) | |||
| 249 | goto unlock; | 250 | goto unlock; |
| 250 | } | 251 | } |
| 251 | 252 | ||
| 253 | if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) { | ||
| 254 | ret = -EINVAL; | ||
| 255 | goto unlock; | ||
| 256 | } | ||
| 257 | |||
| 252 | cpumask_clear(cpumask); | 258 | cpumask_clear(cpumask); |
| 253 | 259 | ||
| 254 | if (opp_table->shared_opp) { | 260 | if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { |
| 255 | list_for_each_entry(opp_dev, &opp_table->dev_list, node) | 261 | list_for_each_entry(opp_dev, &opp_table->dev_list, node) |
| 256 | cpumask_set_cpu(opp_dev->dev->id, cpumask); | 262 | cpumask_set_cpu(opp_dev->dev->id, cpumask); |
| 257 | } else { | 263 | } else { |
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 94d2010558e3..1dfd3dd92624 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c | |||
| @@ -34,7 +34,10 @@ static struct opp_table *_managed_opp(const struct device_node *np) | |||
| 34 | * But the OPPs will be considered as shared only if the | 34 | * But the OPPs will be considered as shared only if the |
| 35 | * OPP table contains a "opp-shared" property. | 35 | * OPP table contains a "opp-shared" property. |
| 36 | */ | 36 | */ |
| 37 | return opp_table->shared_opp ? opp_table : NULL; | 37 | if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) |
| 38 | return opp_table; | ||
| 39 | |||
| 40 | return NULL; | ||
| 38 | } | 41 | } |
| 39 | } | 42 | } |
| 40 | 43 | ||
| @@ -353,7 +356,10 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) | |||
| 353 | } | 356 | } |
| 354 | 357 | ||
| 355 | opp_table->np = opp_np; | 358 | opp_table->np = opp_np; |
| 356 | opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared"); | 359 | if (of_property_read_bool(opp_np, "opp-shared")) |
| 360 | opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; | ||
| 361 | else | ||
| 362 | opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE; | ||
| 357 | 363 | ||
| 358 | mutex_unlock(&opp_table_lock); | 364 | mutex_unlock(&opp_table_lock); |
| 359 | 365 | ||
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h index 20f3be22e060..fabd5ca1a083 100644 --- a/drivers/base/power/opp/opp.h +++ b/drivers/base/power/opp/opp.h | |||
| @@ -119,6 +119,12 @@ struct opp_device { | |||
| 119 | #endif | 119 | #endif |
| 120 | }; | 120 | }; |
| 121 | 121 | ||
| 122 | enum opp_table_access { | ||
| 123 | OPP_TABLE_ACCESS_UNKNOWN = 0, | ||
| 124 | OPP_TABLE_ACCESS_EXCLUSIVE = 1, | ||
| 125 | OPP_TABLE_ACCESS_SHARED = 2, | ||
| 126 | }; | ||
| 127 | |||
| 122 | /** | 128 | /** |
| 123 | * struct opp_table - Device opp structure | 129 | * struct opp_table - Device opp structure |
| 124 | * @node: table node - contains the devices with OPPs that | 130 | * @node: table node - contains the devices with OPPs that |
| @@ -166,7 +172,7 @@ struct opp_table { | |||
| 166 | /* For backward compatibility with v1 bindings */ | 172 | /* For backward compatibility with v1 bindings */ |
| 167 | unsigned int voltage_tolerance_v1; | 173 | unsigned int voltage_tolerance_v1; |
| 168 | 174 | ||
| 169 | bool shared_opp; | 175 | enum opp_table_access shared_opp; |
| 170 | struct dev_pm_opp *suspend_opp; | 176 | struct dev_pm_opp *suspend_opp; |
| 171 | 177 | ||
| 172 | unsigned int *supported_hw; | 178 | unsigned int *supported_hw; |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index d597e432e195..ab19adb07a12 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
| @@ -1750,7 +1750,7 @@ aoecmd_init(void) | |||
| 1750 | int ret; | 1750 | int ret; |
| 1751 | 1751 | ||
| 1752 | /* get_zeroed_page returns page with ref count 1 */ | 1752 | /* get_zeroed_page returns page with ref count 1 */ |
| 1753 | p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); | 1753 | p = (void *) get_zeroed_page(GFP_KERNEL); |
| 1754 | if (!p) | 1754 | if (!p) |
| 1755 | return -ENOMEM; | 1755 | return -ENOMEM; |
| 1756 | empty_page = virt_to_page(p); | 1756 | empty_page = virt_to_page(p); |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 31e73a7a40f2..6a48ed41963f 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
| @@ -941,7 +941,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd) | |||
| 941 | debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); | 941 | debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); |
| 942 | debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); | 942 | debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); |
| 943 | debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); | 943 | debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); |
| 944 | debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops); | 944 | debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); |
| 945 | 945 | ||
| 946 | return 0; | 946 | return 0; |
| 947 | } | 947 | } |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index ca13df854639..2e6d1e9c3345 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -874,8 +874,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 874 | const struct blk_mq_queue_data *qd) | 874 | const struct blk_mq_queue_data *qd) |
| 875 | { | 875 | { |
| 876 | unsigned long flags; | 876 | unsigned long flags; |
| 877 | struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data; | 877 | int qid = hctx->queue_num; |
| 878 | struct blkfront_info *info = hctx->queue->queuedata; | ||
| 879 | struct blkfront_ring_info *rinfo = NULL; | ||
| 878 | 880 | ||
| 881 | BUG_ON(info->nr_rings <= qid); | ||
| 882 | rinfo = &info->rinfo[qid]; | ||
| 879 | blk_mq_start_request(qd->rq); | 883 | blk_mq_start_request(qd->rq); |
| 880 | spin_lock_irqsave(&rinfo->ring_lock, flags); | 884 | spin_lock_irqsave(&rinfo->ring_lock, flags); |
| 881 | if (RING_FULL(&rinfo->ring)) | 885 | if (RING_FULL(&rinfo->ring)) |
| @@ -901,20 +905,9 @@ out_busy: | |||
| 901 | return BLK_MQ_RQ_QUEUE_BUSY; | 905 | return BLK_MQ_RQ_QUEUE_BUSY; |
| 902 | } | 906 | } |
| 903 | 907 | ||
| 904 | static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | ||
| 905 | unsigned int index) | ||
| 906 | { | ||
| 907 | struct blkfront_info *info = (struct blkfront_info *)data; | ||
| 908 | |||
| 909 | BUG_ON(info->nr_rings <= index); | ||
| 910 | hctx->driver_data = &info->rinfo[index]; | ||
| 911 | return 0; | ||
| 912 | } | ||
| 913 | |||
| 914 | static struct blk_mq_ops blkfront_mq_ops = { | 908 | static struct blk_mq_ops blkfront_mq_ops = { |
| 915 | .queue_rq = blkif_queue_rq, | 909 | .queue_rq = blkif_queue_rq, |
| 916 | .map_queue = blk_mq_map_queue, | 910 | .map_queue = blk_mq_map_queue, |
| 917 | .init_hctx = blk_mq_init_hctx, | ||
| 918 | }; | 911 | }; |
| 919 | 912 | ||
| 920 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | 913 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, |
| @@ -950,6 +943,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | |||
| 950 | return PTR_ERR(rq); | 943 | return PTR_ERR(rq); |
| 951 | } | 944 | } |
| 952 | 945 | ||
| 946 | rq->queuedata = info; | ||
| 953 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); | 947 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); |
| 954 | 948 | ||
| 955 | if (info->feature_discard) { | 949 | if (info->feature_discard) { |
| @@ -2149,6 +2143,8 @@ static int blkfront_resume(struct xenbus_device *dev) | |||
| 2149 | return err; | 2143 | return err; |
| 2150 | 2144 | ||
| 2151 | err = talk_to_blkback(dev, info); | 2145 | err = talk_to_blkback(dev, info); |
| 2146 | if (!err) | ||
| 2147 | blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); | ||
| 2152 | 2148 | ||
| 2153 | /* | 2149 | /* |
| 2154 | * We have to wait for the backend to switch to | 2150 | * We have to wait for the backend to switch to |
| @@ -2485,10 +2481,23 @@ static void blkback_changed(struct xenbus_device *dev, | |||
| 2485 | break; | 2481 | break; |
| 2486 | 2482 | ||
| 2487 | case XenbusStateConnected: | 2483 | case XenbusStateConnected: |
| 2488 | if (dev->state != XenbusStateInitialised) { | 2484 | /* |
| 2485 | * talk_to_blkback sets state to XenbusStateInitialised | ||
| 2486 | * and blkfront_connect sets it to XenbusStateConnected | ||
| 2487 | * (if connection went OK). | ||
| 2488 | * | ||
| 2489 | * If the backend (or toolstack) decides to poke at backend | ||
| 2490 | * state (and re-trigger the watch by setting the state repeatedly | ||
| 2491 | * to XenbusStateConnected (4)) we need to deal with this. | ||
| 2492 | * This is allowed as this is used to communicate to the guest | ||
| 2493 | * that the size of disk has changed! | ||
| 2494 | */ | ||
| 2495 | if ((dev->state != XenbusStateInitialised) && | ||
| 2496 | (dev->state != XenbusStateConnected)) { | ||
| 2489 | if (talk_to_blkback(dev, info)) | 2497 | if (talk_to_blkback(dev, info)) |
| 2490 | break; | 2498 | break; |
| 2491 | } | 2499 | } |
| 2500 | |||
| 2492 | blkfront_connect(info); | 2501 | blkfront_connect(info); |
| 2493 | break; | 2502 | break; |
| 2494 | 2503 | ||
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 94fb407d8561..44b1bd6baa38 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
| @@ -3820,6 +3820,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf) | |||
| 3820 | while (!list_empty(&intf->waiting_rcv_msgs)) { | 3820 | while (!list_empty(&intf->waiting_rcv_msgs)) { |
| 3821 | smi_msg = list_entry(intf->waiting_rcv_msgs.next, | 3821 | smi_msg = list_entry(intf->waiting_rcv_msgs.next, |
| 3822 | struct ipmi_smi_msg, link); | 3822 | struct ipmi_smi_msg, link); |
| 3823 | list_del(&smi_msg->link); | ||
| 3823 | if (!run_to_completion) | 3824 | if (!run_to_completion) |
| 3824 | spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, | 3825 | spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, |
| 3825 | flags); | 3826 | flags); |
| @@ -3829,11 +3830,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf) | |||
| 3829 | if (rv > 0) { | 3830 | if (rv > 0) { |
| 3830 | /* | 3831 | /* |
| 3831 | * To preserve message order, quit if we | 3832 | * To preserve message order, quit if we |
| 3832 | * can't handle a message. | 3833 | * can't handle a message. Add the message |
| 3834 | * back at the head, this is safe because this | ||
| 3835 | * tasklet is the only thing that pulls the | ||
| 3836 | * messages. | ||
| 3833 | */ | 3837 | */ |
| 3838 | list_add(&smi_msg->link, &intf->waiting_rcv_msgs); | ||
| 3834 | break; | 3839 | break; |
| 3835 | } else { | 3840 | } else { |
| 3836 | list_del(&smi_msg->link); | ||
| 3837 | if (rv == 0) | 3841 | if (rv == 0) |
| 3838 | /* Message handled */ | 3842 | /* Message handled */ |
| 3839 | ipmi_free_smi_msg(smi_msg); | 3843 | ipmi_free_smi_msg(smi_msg); |
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 53ddba26578c..98efbfcdb503 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig | |||
| @@ -175,6 +175,7 @@ config COMMON_CLK_KEYSTONE | |||
| 175 | config COMMON_CLK_NXP | 175 | config COMMON_CLK_NXP |
| 176 | def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX) | 176 | def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX) |
| 177 | select REGMAP_MMIO if ARCH_LPC32XX | 177 | select REGMAP_MMIO if ARCH_LPC32XX |
| 178 | select MFD_SYSCON if ARCH_LPC18XX | ||
| 178 | ---help--- | 179 | ---help--- |
| 179 | Support for clock providers on NXP platforms. | 180 | Support for clock providers on NXP platforms. |
| 180 | 181 | ||
diff --git a/drivers/clk/microchip/clk-pic32mzda.c b/drivers/clk/microchip/clk-pic32mzda.c index 020a29acc5b0..51f54380474b 100644 --- a/drivers/clk/microchip/clk-pic32mzda.c +++ b/drivers/clk/microchip/clk-pic32mzda.c | |||
| @@ -180,15 +180,15 @@ static int pic32mzda_clk_probe(struct platform_device *pdev) | |||
| 180 | 180 | ||
| 181 | /* register fixed rate clocks */ | 181 | /* register fixed rate clocks */ |
| 182 | clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL, | 182 | clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL, |
| 183 | CLK_IS_ROOT, 24000000); | 183 | 0, 24000000); |
| 184 | clks[FRCCLK] = clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL, | 184 | clks[FRCCLK] = clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL, |
| 185 | CLK_IS_ROOT, 8000000); | 185 | 0, 8000000); |
| 186 | clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL, | 186 | clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL, |
| 187 | CLK_IS_ROOT, 8000000); | 187 | 0, 8000000); |
| 188 | clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL, | 188 | clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL, |
| 189 | CLK_IS_ROOT, 32000); | 189 | 0, 32000); |
| 190 | clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL, | 190 | clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL, |
| 191 | CLK_IS_ROOT, 24000000); | 191 | 0, 24000000); |
| 192 | /* fixed rate (optional) clock */ | 192 | /* fixed rate (optional) clock */ |
| 193 | if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) { | 193 | if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) { |
| 194 | pr_info("pic32-clk: dt requests SOSC.\n"); | 194 | pr_info("pic32-clk: dt requests SOSC.\n"); |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 0d159b513469..fe9dc17ea873 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -372,26 +372,9 @@ static bool intel_pstate_get_ppc_enable_status(void) | |||
| 372 | return acpi_ppc; | 372 | return acpi_ppc; |
| 373 | } | 373 | } |
| 374 | 374 | ||
| 375 | /* | ||
| 376 | * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and | ||
| 377 | * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and | ||
| 378 | * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state | ||
| 379 | * ratio, out of it only high 8 bits are used. For example 0x1700 is setting | ||
| 380 | * target ratio 0x17. The _PSS control value stores in a format which can be | ||
| 381 | * directly written to PERF_CTL MSR. But in intel_pstate driver this shift | ||
| 382 | * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). | ||
| 383 | * This function converts the _PSS control value to intel pstate driver format | ||
| 384 | * for comparison and assignment. | ||
| 385 | */ | ||
| 386 | static int convert_to_native_pstate_format(struct cpudata *cpu, int index) | ||
| 387 | { | ||
| 388 | return cpu->acpi_perf_data.states[index].control >> 8; | ||
| 389 | } | ||
| 390 | |||
| 391 | static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) | 375 | static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) |
| 392 | { | 376 | { |
| 393 | struct cpudata *cpu; | 377 | struct cpudata *cpu; |
| 394 | int turbo_pss_ctl; | ||
| 395 | int ret; | 378 | int ret; |
| 396 | int i; | 379 | int i; |
| 397 | 380 | ||
| @@ -441,11 +424,10 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) | |||
| 441 | * max frequency, which will cause a reduced performance as | 424 | * max frequency, which will cause a reduced performance as |
| 442 | * this driver uses real max turbo frequency as the max | 425 | * this driver uses real max turbo frequency as the max |
| 443 | * frequency. So correct this frequency in _PSS table to | 426 | * frequency. So correct this frequency in _PSS table to |
| 444 | * correct max turbo frequency based on the turbo ratio. | 427 | * correct max turbo frequency based on the turbo state. |
| 445 | * Also need to convert to MHz as _PSS freq is in MHz. | 428 | * Also need to convert to MHz as _PSS freq is in MHz. |
| 446 | */ | 429 | */ |
| 447 | turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); | 430 | if (!limits->turbo_disabled) |
| 448 | if (turbo_pss_ctl > cpu->pstate.max_pstate) | ||
| 449 | cpu->acpi_perf_data.states[0].core_frequency = | 431 | cpu->acpi_perf_data.states[0].core_frequency = |
| 450 | policy->cpuinfo.max_freq / 1000; | 432 | policy->cpuinfo.max_freq / 1000; |
| 451 | cpu->valid_pss_table = true; | 433 | cpu->valid_pss_table = true; |
| @@ -1460,6 +1442,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 1460 | 1442 | ||
| 1461 | intel_pstate_clear_update_util_hook(policy->cpu); | 1443 | intel_pstate_clear_update_util_hook(policy->cpu); |
| 1462 | 1444 | ||
| 1445 | pr_debug("set_policy cpuinfo.max %u policy->max %u\n", | ||
| 1446 | policy->cpuinfo.max_freq, policy->max); | ||
| 1447 | |||
| 1463 | cpu = all_cpu_data[0]; | 1448 | cpu = all_cpu_data[0]; |
| 1464 | if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && | 1449 | if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && |
| 1465 | policy->max < policy->cpuinfo.max_freq && | 1450 | policy->max < policy->cpuinfo.max_freq && |
| @@ -1495,13 +1480,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 1495 | limits->max_sysfs_pct); | 1480 | limits->max_sysfs_pct); |
| 1496 | limits->max_perf_pct = max(limits->min_policy_pct, | 1481 | limits->max_perf_pct = max(limits->min_policy_pct, |
| 1497 | limits->max_perf_pct); | 1482 | limits->max_perf_pct); |
| 1498 | limits->max_perf = round_up(limits->max_perf, FRAC_BITS); | ||
| 1499 | 1483 | ||
| 1500 | /* Make sure min_perf_pct <= max_perf_pct */ | 1484 | /* Make sure min_perf_pct <= max_perf_pct */ |
| 1501 | limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); | 1485 | limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); |
| 1502 | 1486 | ||
| 1503 | limits->min_perf = div_fp(limits->min_perf_pct, 100); | 1487 | limits->min_perf = div_fp(limits->min_perf_pct, 100); |
| 1504 | limits->max_perf = div_fp(limits->max_perf_pct, 100); | 1488 | limits->max_perf = div_fp(limits->max_perf_pct, 100); |
| 1489 | limits->max_perf = round_up(limits->max_perf, FRAC_BITS); | ||
| 1505 | 1490 | ||
| 1506 | out: | 1491 | out: |
| 1507 | intel_pstate_set_update_util_hook(policy->cpu); | 1492 | intel_pstate_set_update_util_hook(policy->cpu); |
| @@ -1558,8 +1543,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) | |||
| 1558 | 1543 | ||
| 1559 | /* cpuinfo and default policy values */ | 1544 | /* cpuinfo and default policy values */ |
| 1560 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; | 1545 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; |
| 1561 | policy->cpuinfo.max_freq = | 1546 | update_turbo_state(); |
| 1562 | cpu->pstate.turbo_pstate * cpu->pstate.scaling; | 1547 | policy->cpuinfo.max_freq = limits->turbo_disabled ? |
| 1548 | cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; | ||
| 1549 | policy->cpuinfo.max_freq *= cpu->pstate.scaling; | ||
| 1550 | |||
| 1563 | intel_pstate_init_acpi_perf_limits(policy); | 1551 | intel_pstate_init_acpi_perf_limits(policy); |
| 1564 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 1552 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
| 1565 | cpumask_set_cpu(policy->cpu, policy->cpus); | 1553 | cpumask_set_cpu(policy->cpu, policy->cpus); |
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 808a320e9d5d..a7ecb9a84c15 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c | |||
| @@ -487,7 +487,7 @@ static int __init pcc_cpufreq_probe(void) | |||
| 487 | doorbell.space_id = reg_resource->space_id; | 487 | doorbell.space_id = reg_resource->space_id; |
| 488 | doorbell.bit_width = reg_resource->bit_width; | 488 | doorbell.bit_width = reg_resource->bit_width; |
| 489 | doorbell.bit_offset = reg_resource->bit_offset; | 489 | doorbell.bit_offset = reg_resource->bit_offset; |
| 490 | doorbell.access_width = 64; | 490 | doorbell.access_width = 4; |
| 491 | doorbell.address = reg_resource->address; | 491 | doorbell.address = reg_resource->address; |
| 492 | 492 | ||
| 493 | pr_debug("probe: doorbell: space_id is %d, bit_width is %d, " | 493 | pr_debug("probe: doorbell: space_id is %d, bit_width is %d, " |
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 1d6c803804d5..e92418facc92 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c | |||
| @@ -268,8 +268,11 @@ int update_devfreq(struct devfreq *devfreq) | |||
| 268 | devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); | 268 | devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); |
| 269 | 269 | ||
| 270 | err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); | 270 | err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); |
| 271 | if (err) | 271 | if (err) { |
| 272 | freqs.new = cur_freq; | ||
| 273 | devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); | ||
| 272 | return err; | 274 | return err; |
| 275 | } | ||
| 273 | 276 | ||
| 274 | freqs.new = freq; | 277 | freqs.new = freq; |
| 275 | devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); | 278 | devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); |
| @@ -552,6 +555,7 @@ struct devfreq *devfreq_add_device(struct device *dev, | |||
| 552 | devfreq->profile = profile; | 555 | devfreq->profile = profile; |
| 553 | strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); | 556 | strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); |
| 554 | devfreq->previous_freq = profile->initial_freq; | 557 | devfreq->previous_freq = profile->initial_freq; |
| 558 | devfreq->last_status.current_frequency = profile->initial_freq; | ||
| 555 | devfreq->data = data; | 559 | devfreq->data = data; |
| 556 | devfreq->nb.notifier_call = devfreq_notifier_call; | 560 | devfreq->nb.notifier_call = devfreq_notifier_call; |
| 557 | 561 | ||
| @@ -561,23 +565,22 @@ struct devfreq *devfreq_add_device(struct device *dev, | |||
| 561 | mutex_lock(&devfreq->lock); | 565 | mutex_lock(&devfreq->lock); |
| 562 | } | 566 | } |
| 563 | 567 | ||
| 564 | devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) * | ||
| 565 | devfreq->profile->max_state * | ||
| 566 | devfreq->profile->max_state, | ||
| 567 | GFP_KERNEL); | ||
| 568 | devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) * | ||
| 569 | devfreq->profile->max_state, | ||
| 570 | GFP_KERNEL); | ||
| 571 | devfreq->last_stat_updated = jiffies; | ||
| 572 | |||
| 573 | dev_set_name(&devfreq->dev, "%s", dev_name(dev)); | 568 | dev_set_name(&devfreq->dev, "%s", dev_name(dev)); |
| 574 | err = device_register(&devfreq->dev); | 569 | err = device_register(&devfreq->dev); |
| 575 | if (err) { | 570 | if (err) { |
| 576 | put_device(&devfreq->dev); | ||
| 577 | mutex_unlock(&devfreq->lock); | 571 | mutex_unlock(&devfreq->lock); |
| 578 | goto err_out; | 572 | goto err_out; |
| 579 | } | 573 | } |
| 580 | 574 | ||
| 575 | devfreq->trans_table = devm_kzalloc(&devfreq->dev, sizeof(unsigned int) * | ||
| 576 | devfreq->profile->max_state * | ||
| 577 | devfreq->profile->max_state, | ||
| 578 | GFP_KERNEL); | ||
| 579 | devfreq->time_in_state = devm_kzalloc(&devfreq->dev, sizeof(unsigned long) * | ||
| 580 | devfreq->profile->max_state, | ||
| 581 | GFP_KERNEL); | ||
| 582 | devfreq->last_stat_updated = jiffies; | ||
| 583 | |||
| 581 | srcu_init_notifier_head(&devfreq->transition_notifier_list); | 584 | srcu_init_notifier_head(&devfreq->transition_notifier_list); |
| 582 | 585 | ||
| 583 | mutex_unlock(&devfreq->lock); | 586 | mutex_unlock(&devfreq->lock); |
| @@ -603,7 +606,6 @@ struct devfreq *devfreq_add_device(struct device *dev, | |||
| 603 | err_init: | 606 | err_init: |
| 604 | list_del(&devfreq->node); | 607 | list_del(&devfreq->node); |
| 605 | device_unregister(&devfreq->dev); | 608 | device_unregister(&devfreq->dev); |
| 606 | kfree(devfreq); | ||
| 607 | err_out: | 609 | err_out: |
| 608 | return ERR_PTR(err); | 610 | return ERR_PTR(err); |
| 609 | } | 611 | } |
| @@ -621,7 +623,6 @@ int devfreq_remove_device(struct devfreq *devfreq) | |||
| 621 | return -EINVAL; | 623 | return -EINVAL; |
| 622 | 624 | ||
| 623 | device_unregister(&devfreq->dev); | 625 | device_unregister(&devfreq->dev); |
| 624 | put_device(&devfreq->dev); | ||
| 625 | 626 | ||
| 626 | return 0; | 627 | return 0; |
| 627 | } | 628 | } |
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c index 6b6a5f310486..a5841403bde8 100644 --- a/drivers/devfreq/event/exynos-nocp.c +++ b/drivers/devfreq/event/exynos-nocp.c | |||
| @@ -220,9 +220,6 @@ static int exynos_nocp_parse_dt(struct platform_device *pdev, | |||
| 220 | 220 | ||
| 221 | /* Maps the memory mapped IO to control nocp register */ | 221 | /* Maps the memory mapped IO to control nocp register */ |
| 222 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 222 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 223 | if (IS_ERR(res)) | ||
| 224 | return PTR_ERR(res); | ||
| 225 | |||
| 226 | base = devm_ioremap_resource(dev, res); | 223 | base = devm_ioremap_resource(dev, res); |
| 227 | if (IS_ERR(base)) | 224 | if (IS_ERR(base)) |
| 228 | return PTR_ERR(base); | 225 | return PTR_ERR(base); |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 8e304b1befc5..75bd6621dc5d 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
| @@ -242,7 +242,7 @@ struct at_xdmac_lld { | |||
| 242 | u32 mbr_dus; /* Destination Microblock Stride Register */ | 242 | u32 mbr_dus; /* Destination Microblock Stride Register */ |
| 243 | }; | 243 | }; |
| 244 | 244 | ||
| 245 | 245 | /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ | |
| 246 | struct at_xdmac_desc { | 246 | struct at_xdmac_desc { |
| 247 | struct at_xdmac_lld lld; | 247 | struct at_xdmac_lld lld; |
| 248 | enum dma_transfer_direction direction; | 248 | enum dma_transfer_direction direction; |
| @@ -253,7 +253,7 @@ struct at_xdmac_desc { | |||
| 253 | unsigned int xfer_size; | 253 | unsigned int xfer_size; |
| 254 | struct list_head descs_list; | 254 | struct list_head descs_list; |
| 255 | struct list_head xfer_node; | 255 | struct list_head xfer_node; |
| 256 | }; | 256 | } __aligned(sizeof(u64)); |
| 257 | 257 | ||
| 258 | static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) | 258 | static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) |
| 259 | { | 259 | { |
| @@ -1400,6 +1400,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
| 1400 | u32 cur_nda, check_nda, cur_ubc, mask, value; | 1400 | u32 cur_nda, check_nda, cur_ubc, mask, value; |
| 1401 | u8 dwidth = 0; | 1401 | u8 dwidth = 0; |
| 1402 | unsigned long flags; | 1402 | unsigned long flags; |
| 1403 | bool initd; | ||
| 1403 | 1404 | ||
| 1404 | ret = dma_cookie_status(chan, cookie, txstate); | 1405 | ret = dma_cookie_status(chan, cookie, txstate); |
| 1405 | if (ret == DMA_COMPLETE) | 1406 | if (ret == DMA_COMPLETE) |
| @@ -1424,7 +1425,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
| 1424 | residue = desc->xfer_size; | 1425 | residue = desc->xfer_size; |
| 1425 | /* | 1426 | /* |
| 1426 | * Flush FIFO: only relevant when the transfer is source peripheral | 1427 | * Flush FIFO: only relevant when the transfer is source peripheral |
| 1427 | * synchronized. | 1428 | * synchronized. Flush is needed before reading CUBC because data in |
| 1429 | * the FIFO are not reported by CUBC. Reporting a residue of the | ||
| 1430 | * transfer length while we have data in FIFO can cause issue. | ||
| 1431 | * Usecase: atmel USART has a timeout which means I have received | ||
| 1432 | * characters but there is no more character received for a while. On | ||
| 1433 | * timeout, it requests the residue. If the data are in the DMA FIFO, | ||
| 1434 | * we will return a residue of the transfer length. It means no data | ||
| 1435 | * received. If an application is waiting for these data, it will hang | ||
| 1436 | * since we won't have another USART timeout without receiving new | ||
| 1437 | * data. | ||
| 1428 | */ | 1438 | */ |
| 1429 | mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; | 1439 | mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; |
| 1430 | value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; | 1440 | value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; |
| @@ -1435,34 +1445,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
| 1435 | } | 1445 | } |
| 1436 | 1446 | ||
| 1437 | /* | 1447 | /* |
| 1438 | * When processing the residue, we need to read two registers but we | 1448 | * The easiest way to compute the residue should be to pause the DMA |
| 1439 | * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where | 1449 | * but doing this can lead to miss some data as some devices don't |
| 1440 | * we stand in the descriptor list and AT_XDMAC_CUBC is used | 1450 | * have FIFO. |
| 1441 | * to know how many data are remaining for the current descriptor. | 1451 | * We need to read several registers because: |
| 1442 | * Since the dma channel is not paused to not loose data, between the | 1452 | * - DMA is running therefore a descriptor change is possible while |
| 1443 | * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of | 1453 | * reading these registers |
| 1444 | * descriptor. | 1454 | * - When the block transfer is done, the value of the CUBC register |
| 1445 | * For that reason, after reading AT_XDMAC_CUBC, we check if we are | 1455 | * is set to its initial value until the fetch of the next descriptor. |
| 1446 | * still using the same descriptor by reading a second time | 1456 | * This value will corrupt the residue calculation so we have to skip |
| 1447 | * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to | 1457 | * it. |
| 1448 | * read again AT_XDMAC_CUBC. | 1458 | * |
| 1459 | * INITD -------- ------------ | ||
| 1460 | * |____________________| | ||
| 1461 | * _______________________ _______________ | ||
| 1462 | * NDA @desc2 \/ @desc3 | ||
| 1463 | * _______________________/\_______________ | ||
| 1464 | * __________ ___________ _______________ | ||
| 1465 | * CUBC 0 \/ MAX desc1 \/ MAX desc2 | ||
| 1466 | * __________/\___________/\_______________ | ||
| 1467 | * | ||
| 1468 | * Since descriptors are aligned on 64 bits, we can assume that | ||
| 1469 | * the update of NDA and CUBC is atomic. | ||
| 1449 | * Memory barriers are used to ensure the read order of the registers. | 1470 | * Memory barriers are used to ensure the read order of the registers. |
| 1450 | * A max number of retries is set because unlikely it can never ends if | 1471 | * A max number of retries is set because unlikely it could never ends. |
| 1451 | * we are transferring a lot of data with small buffers. | ||
| 1452 | */ | 1472 | */ |
| 1453 | cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; | ||
| 1454 | rmb(); | ||
| 1455 | cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); | ||
| 1456 | for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { | 1473 | for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { |
| 1457 | rmb(); | ||
| 1458 | check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; | 1474 | check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; |
| 1459 | 1475 | rmb(); | |
| 1460 | if (likely(cur_nda == check_nda)) | 1476 | initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); |
| 1461 | break; | ||
| 1462 | |||
| 1463 | cur_nda = check_nda; | ||
| 1464 | rmb(); | 1477 | rmb(); |
| 1465 | cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); | 1478 | cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); |
| 1479 | rmb(); | ||
| 1480 | cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; | ||
| 1481 | rmb(); | ||
| 1482 | |||
| 1483 | if ((check_nda == cur_nda) && initd) | ||
| 1484 | break; | ||
| 1466 | } | 1485 | } |
| 1467 | 1486 | ||
| 1468 | if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { | 1487 | if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { |
| @@ -1471,6 +1490,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
| 1471 | } | 1490 | } |
| 1472 | 1491 | ||
| 1473 | /* | 1492 | /* |
| 1493 | * Flush FIFO: only relevant when the transfer is source peripheral | ||
| 1494 | * synchronized. Another flush is needed here because CUBC is updated | ||
| 1495 | * when the controller sends the data write command. It can lead to | ||
| 1496 | * report data that are not written in the memory or the device. The | ||
| 1497 | * FIFO flush ensures that data are really written. | ||
| 1498 | */ | ||
| 1499 | if ((desc->lld.mbr_cfg & mask) == value) { | ||
| 1500 | at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); | ||
| 1501 | while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) | ||
| 1502 | cpu_relax(); | ||
| 1503 | } | ||
| 1504 | |||
| 1505 | /* | ||
| 1474 | * Remove size of all microblocks already transferred and the current | 1506 | * Remove size of all microblocks already transferred and the current |
| 1475 | * one. Then add the remaining size to transfer of the current | 1507 | * one. Then add the remaining size to transfer of the current |
| 1476 | * microblock. | 1508 | * microblock. |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 25d1dadcddd1..d0446a75990a 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
| @@ -703,8 +703,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
| 703 | goto free_resources; | 703 | goto free_resources; |
| 704 | } | 704 | } |
| 705 | 705 | ||
| 706 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, | 706 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), |
| 707 | PAGE_SIZE, DMA_TO_DEVICE); | 707 | (size_t)src & ~PAGE_MASK, PAGE_SIZE, |
| 708 | DMA_TO_DEVICE); | ||
| 708 | unmap->addr[0] = src_dma; | 709 | unmap->addr[0] = src_dma; |
| 709 | 710 | ||
| 710 | ret = dma_mapping_error(dma_chan->device->dev, src_dma); | 711 | ret = dma_mapping_error(dma_chan->device->dev, src_dma); |
| @@ -714,8 +715,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
| 714 | } | 715 | } |
| 715 | unmap->to_cnt = 1; | 716 | unmap->to_cnt = 1; |
| 716 | 717 | ||
| 717 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, | 718 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), |
| 718 | PAGE_SIZE, DMA_FROM_DEVICE); | 719 | (size_t)dest & ~PAGE_MASK, PAGE_SIZE, |
| 720 | DMA_FROM_DEVICE); | ||
| 719 | unmap->addr[1] = dest_dma; | 721 | unmap->addr[1] = dest_dma; |
| 720 | 722 | ||
| 721 | ret = dma_mapping_error(dma_chan->device->dev, dest_dma); | 723 | ret = dma_mapping_error(dma_chan->device->dev, dest_dma); |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 6aa256b0a1ed..c3ee3ad98a63 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
| @@ -565,7 +565,8 @@ void edac_mc_reset_delay_period(unsigned long value) | |||
| 565 | list_for_each(item, &mc_devices) { | 565 | list_for_each(item, &mc_devices) { |
| 566 | mci = list_entry(item, struct mem_ctl_info, link); | 566 | mci = list_entry(item, struct mem_ctl_info, link); |
| 567 | 567 | ||
| 568 | edac_mod_work(&mci->work, value); | 568 | if (mci->op_state == OP_RUNNING_POLL) |
| 569 | edac_mod_work(&mci->work, value); | ||
| 569 | } | 570 | } |
| 570 | mutex_unlock(&mem_ctls_mutex); | 571 | mutex_unlock(&mem_ctls_mutex); |
| 571 | } | 572 | } |
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index b4d0bf6534cf..6744d88bdea8 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c | |||
| @@ -239,8 +239,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = { | |||
| 239 | { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, | 239 | { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, |
| 240 | }; | 240 | }; |
| 241 | 241 | ||
| 242 | #define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19) | 242 | #define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \ |
| 243 | #define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14) | 243 | GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19)) |
| 244 | |||
| 245 | #define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \ | ||
| 246 | GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14)) | ||
| 244 | 247 | ||
| 245 | /* Device 16, functions 2-7 */ | 248 | /* Device 16, functions 2-7 */ |
| 246 | 249 | ||
| @@ -326,6 +329,7 @@ struct pci_id_descr { | |||
| 326 | struct pci_id_table { | 329 | struct pci_id_table { |
| 327 | const struct pci_id_descr *descr; | 330 | const struct pci_id_descr *descr; |
| 328 | int n_devs; | 331 | int n_devs; |
| 332 | enum type type; | ||
| 329 | }; | 333 | }; |
| 330 | 334 | ||
| 331 | struct sbridge_dev { | 335 | struct sbridge_dev { |
| @@ -394,9 +398,14 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = { | |||
| 394 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) }, | 398 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) }, |
| 395 | }; | 399 | }; |
| 396 | 400 | ||
| 397 | #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } | 401 | #define PCI_ID_TABLE_ENTRY(A, T) { \ |
| 402 | .descr = A, \ | ||
| 403 | .n_devs = ARRAY_SIZE(A), \ | ||
| 404 | .type = T \ | ||
| 405 | } | ||
| 406 | |||
| 398 | static const struct pci_id_table pci_dev_descr_sbridge_table[] = { | 407 | static const struct pci_id_table pci_dev_descr_sbridge_table[] = { |
| 399 | PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge), | 408 | PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE), |
| 400 | {0,} /* 0 terminated list. */ | 409 | {0,} /* 0 terminated list. */ |
| 401 | }; | 410 | }; |
| 402 | 411 | ||
| @@ -463,7 +472,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = { | |||
| 463 | }; | 472 | }; |
| 464 | 473 | ||
| 465 | static const struct pci_id_table pci_dev_descr_ibridge_table[] = { | 474 | static const struct pci_id_table pci_dev_descr_ibridge_table[] = { |
| 466 | PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge), | 475 | PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE), |
| 467 | {0,} /* 0 terminated list. */ | 476 | {0,} /* 0 terminated list. */ |
| 468 | }; | 477 | }; |
| 469 | 478 | ||
| @@ -536,7 +545,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = { | |||
| 536 | }; | 545 | }; |
| 537 | 546 | ||
| 538 | static const struct pci_id_table pci_dev_descr_haswell_table[] = { | 547 | static const struct pci_id_table pci_dev_descr_haswell_table[] = { |
| 539 | PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell), | 548 | PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL), |
| 540 | {0,} /* 0 terminated list. */ | 549 | {0,} /* 0 terminated list. */ |
| 541 | }; | 550 | }; |
| 542 | 551 | ||
| @@ -580,7 +589,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = { | |||
| 580 | }; | 589 | }; |
| 581 | 590 | ||
| 582 | static const struct pci_id_table pci_dev_descr_knl_table[] = { | 591 | static const struct pci_id_table pci_dev_descr_knl_table[] = { |
| 583 | PCI_ID_TABLE_ENTRY(pci_dev_descr_knl), | 592 | PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING), |
| 584 | {0,} | 593 | {0,} |
| 585 | }; | 594 | }; |
| 586 | 595 | ||
| @@ -648,7 +657,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = { | |||
| 648 | }; | 657 | }; |
| 649 | 658 | ||
| 650 | static const struct pci_id_table pci_dev_descr_broadwell_table[] = { | 659 | static const struct pci_id_table pci_dev_descr_broadwell_table[] = { |
| 651 | PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell), | 660 | PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL), |
| 652 | {0,} /* 0 terminated list. */ | 661 | {0,} /* 0 terminated list. */ |
| 653 | }; | 662 | }; |
| 654 | 663 | ||
| @@ -1894,14 +1903,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci) | |||
| 1894 | pci_read_config_dword(pvt->pci_tad[i], | 1903 | pci_read_config_dword(pvt->pci_tad[i], |
| 1895 | rir_offset[j][k], | 1904 | rir_offset[j][k], |
| 1896 | ®); | 1905 | ®); |
| 1897 | tmp_mb = RIR_OFFSET(reg) << 6; | 1906 | tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6; |
| 1898 | 1907 | ||
| 1899 | gb = div_u64_rem(tmp_mb, 1024, &mb); | 1908 | gb = div_u64_rem(tmp_mb, 1024, &mb); |
| 1900 | edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", | 1909 | edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", |
| 1901 | i, j, k, | 1910 | i, j, k, |
| 1902 | gb, (mb*1000)/1024, | 1911 | gb, (mb*1000)/1024, |
| 1903 | ((u64)tmp_mb) << 20L, | 1912 | ((u64)tmp_mb) << 20L, |
| 1904 | (u32)RIR_RNK_TGT(reg), | 1913 | (u32)RIR_RNK_TGT(pvt->info.type, reg), |
| 1905 | reg); | 1914 | reg); |
| 1906 | } | 1915 | } |
| 1907 | } | 1916 | } |
| @@ -2234,7 +2243,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, | |||
| 2234 | pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], | 2243 | pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], |
| 2235 | rir_offset[n_rir][idx], | 2244 | rir_offset[n_rir][idx], |
| 2236 | ®); | 2245 | ®); |
| 2237 | *rank = RIR_RNK_TGT(reg); | 2246 | *rank = RIR_RNK_TGT(pvt->info.type, reg); |
| 2238 | 2247 | ||
| 2239 | edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", | 2248 | edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", |
| 2240 | n_rir, | 2249 | n_rir, |
| @@ -3357,12 +3366,12 @@ fail0: | |||
| 3357 | #define ICPU(model, table) \ | 3366 | #define ICPU(model, table) \ |
| 3358 | { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table } | 3367 | { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table } |
| 3359 | 3368 | ||
| 3360 | /* Order here must match "enum type" */ | ||
| 3361 | static const struct x86_cpu_id sbridge_cpuids[] = { | 3369 | static const struct x86_cpu_id sbridge_cpuids[] = { |
| 3362 | ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */ | 3370 | ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */ |
| 3363 | ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */ | 3371 | ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */ |
| 3364 | ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */ | 3372 | ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */ |
| 3365 | ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */ | 3373 | ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */ |
| 3374 | ICPU(0x56, pci_dev_descr_broadwell_table), /* BROADWELL-DE */ | ||
| 3366 | ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */ | 3375 | ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */ |
| 3367 | { } | 3376 | { } |
| 3368 | }; | 3377 | }; |
| @@ -3398,7 +3407,7 @@ static int sbridge_probe(const struct x86_cpu_id *id) | |||
| 3398 | mc, mc + 1, num_mc); | 3407 | mc, mc + 1, num_mc); |
| 3399 | 3408 | ||
| 3400 | sbridge_dev->mc = mc++; | 3409 | sbridge_dev->mc = mc++; |
| 3401 | rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids); | 3410 | rc = sbridge_register_mci(sbridge_dev, ptable->type); |
| 3402 | if (unlikely(rc < 0)) | 3411 | if (unlikely(rc < 0)) |
| 3403 | goto fail1; | 3412 | goto fail1; |
| 3404 | } | 3413 | } |
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c index 8b3226dca1d9..caff46c0e214 100644 --- a/drivers/extcon/extcon-palmas.c +++ b/drivers/extcon/extcon-palmas.c | |||
| @@ -360,6 +360,8 @@ static int palmas_usb_probe(struct platform_device *pdev) | |||
| 360 | 360 | ||
| 361 | palmas_enable_irq(palmas_usb); | 361 | palmas_enable_irq(palmas_usb); |
| 362 | /* perform initial detection */ | 362 | /* perform initial detection */ |
| 363 | if (palmas_usb->enable_gpio_vbus_detection) | ||
| 364 | palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb); | ||
| 363 | palmas_gpio_id_detect(&palmas_usb->wq_detectid.work); | 365 | palmas_gpio_id_detect(&palmas_usb->wq_detectid.work); |
| 364 | device_set_wakeup_capable(&pdev->dev, true); | 366 | device_set_wakeup_capable(&pdev->dev, true); |
| 365 | return 0; | 367 | return 0; |
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index a850cbc48d8d..c49d50e68aee 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c | |||
| @@ -174,6 +174,7 @@ static __init void reserve_regions(void) | |||
| 174 | { | 174 | { |
| 175 | efi_memory_desc_t *md; | 175 | efi_memory_desc_t *md; |
| 176 | u64 paddr, npages, size; | 176 | u64 paddr, npages, size; |
| 177 | int resv; | ||
| 177 | 178 | ||
| 178 | if (efi_enabled(EFI_DBG)) | 179 | if (efi_enabled(EFI_DBG)) |
| 179 | pr_info("Processing EFI memory map:\n"); | 180 | pr_info("Processing EFI memory map:\n"); |
| @@ -190,12 +191,14 @@ static __init void reserve_regions(void) | |||
| 190 | paddr = md->phys_addr; | 191 | paddr = md->phys_addr; |
| 191 | npages = md->num_pages; | 192 | npages = md->num_pages; |
| 192 | 193 | ||
| 194 | resv = is_reserve_region(md); | ||
| 193 | if (efi_enabled(EFI_DBG)) { | 195 | if (efi_enabled(EFI_DBG)) { |
| 194 | char buf[64]; | 196 | char buf[64]; |
| 195 | 197 | ||
| 196 | pr_info(" 0x%012llx-0x%012llx %s", | 198 | pr_info(" 0x%012llx-0x%012llx %s%s\n", |
| 197 | paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, | 199 | paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, |
| 198 | efi_md_typeattr_format(buf, sizeof(buf), md)); | 200 | efi_md_typeattr_format(buf, sizeof(buf), md), |
| 201 | resv ? "*" : ""); | ||
| 199 | } | 202 | } |
| 200 | 203 | ||
| 201 | memrange_efi_to_native(&paddr, &npages); | 204 | memrange_efi_to_native(&paddr, &npages); |
| @@ -204,14 +207,9 @@ static __init void reserve_regions(void) | |||
| 204 | if (is_normal_ram(md)) | 207 | if (is_normal_ram(md)) |
| 205 | early_init_dt_add_memory_arch(paddr, size); | 208 | early_init_dt_add_memory_arch(paddr, size); |
| 206 | 209 | ||
| 207 | if (is_reserve_region(md)) { | 210 | if (resv) |
| 208 | memblock_mark_nomap(paddr, size); | 211 | memblock_mark_nomap(paddr, size); |
| 209 | if (efi_enabled(EFI_DBG)) | ||
| 210 | pr_cont("*"); | ||
| 211 | } | ||
| 212 | 212 | ||
| 213 | if (efi_enabled(EFI_DBG)) | ||
| 214 | pr_cont("\n"); | ||
| 215 | } | 213 | } |
| 216 | 214 | ||
| 217 | set_bit(EFI_MEMMAP, &efi.flags); | 215 | set_bit(EFI_MEMMAP, &efi.flags); |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 48da857f4774..cebcb405812e 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
| @@ -33,6 +33,7 @@ config ARCH_REQUIRE_GPIOLIB | |||
| 33 | 33 | ||
| 34 | menuconfig GPIOLIB | 34 | menuconfig GPIOLIB |
| 35 | bool "GPIO Support" | 35 | bool "GPIO Support" |
| 36 | select ANON_INODES | ||
| 36 | help | 37 | help |
| 37 | This enables GPIO support through the generic GPIO library. | 38 | This enables GPIO support through the generic GPIO library. |
| 38 | You only need to enable this, if you also want to enable | 39 | You only need to enable this, if you also want to enable |
| @@ -530,7 +531,7 @@ menu "Port-mapped I/O GPIO drivers" | |||
| 530 | 531 | ||
| 531 | config GPIO_104_DIO_48E | 532 | config GPIO_104_DIO_48E |
| 532 | tristate "ACCES 104-DIO-48E GPIO support" | 533 | tristate "ACCES 104-DIO-48E GPIO support" |
| 533 | depends on ISA | 534 | depends on ISA_BUS_API |
| 534 | select GPIOLIB_IRQCHIP | 535 | select GPIOLIB_IRQCHIP |
| 535 | help | 536 | help |
| 536 | Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E, | 537 | Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E, |
| @@ -540,7 +541,7 @@ config GPIO_104_DIO_48E | |||
| 540 | 541 | ||
| 541 | config GPIO_104_IDIO_16 | 542 | config GPIO_104_IDIO_16 |
| 542 | tristate "ACCES 104-IDIO-16 GPIO support" | 543 | tristate "ACCES 104-IDIO-16 GPIO support" |
| 543 | depends on ISA | 544 | depends on ISA_BUS_API |
| 544 | select GPIOLIB_IRQCHIP | 545 | select GPIOLIB_IRQCHIP |
| 545 | help | 546 | help |
| 546 | Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16, | 547 | Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16, |
| @@ -551,7 +552,7 @@ config GPIO_104_IDIO_16 | |||
| 551 | 552 | ||
| 552 | config GPIO_104_IDI_48 | 553 | config GPIO_104_IDI_48 |
| 553 | tristate "ACCES 104-IDI-48 GPIO support" | 554 | tristate "ACCES 104-IDI-48 GPIO support" |
| 554 | depends on ISA | 555 | depends on ISA_BUS_API |
| 555 | select GPIOLIB_IRQCHIP | 556 | select GPIOLIB_IRQCHIP |
| 556 | help | 557 | help |
| 557 | Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A, | 558 | Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A, |
| @@ -627,7 +628,7 @@ config GPIO_TS5500 | |||
| 627 | 628 | ||
| 628 | config GPIO_WS16C48 | 629 | config GPIO_WS16C48 |
| 629 | tristate "WinSystems WS16C48 GPIO support" | 630 | tristate "WinSystems WS16C48 GPIO support" |
| 630 | depends on ISA | 631 | depends on ISA_BUS_API |
| 631 | select GPIOLIB_IRQCHIP | 632 | select GPIOLIB_IRQCHIP |
| 632 | help | 633 | help |
| 633 | Enables GPIO support for the WinSystems WS16C48. The base port | 634 | Enables GPIO support for the WinSystems WS16C48. The base port |
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c index 1a647c07be67..fcf776971ca9 100644 --- a/drivers/gpio/gpio-104-dio-48e.c +++ b/drivers/gpio/gpio-104-dio-48e.c | |||
| @@ -75,7 +75,7 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | |||
| 75 | { | 75 | { |
| 76 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); | 76 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); |
| 77 | const unsigned io_port = offset / 8; | 77 | const unsigned io_port = offset / 8; |
| 78 | const unsigned control_port = io_port / 2; | 78 | const unsigned int control_port = io_port / 3; |
| 79 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; | 79 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; |
| 80 | unsigned long flags; | 80 | unsigned long flags; |
| 81 | unsigned control; | 81 | unsigned control; |
| @@ -115,7 +115,7 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset, | |||
| 115 | { | 115 | { |
| 116 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); | 116 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); |
| 117 | const unsigned io_port = offset / 8; | 117 | const unsigned io_port = offset / 8; |
| 118 | const unsigned control_port = io_port / 2; | 118 | const unsigned int control_port = io_port / 3; |
| 119 | const unsigned mask = BIT(offset % 8); | 119 | const unsigned mask = BIT(offset % 8); |
| 120 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; | 120 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; |
| 121 | const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port; | 121 | const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port; |
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c index 6c75c83baf5a..2d2763ea1a68 100644 --- a/drivers/gpio/gpio-104-idi-48.c +++ b/drivers/gpio/gpio-104-idi-48.c | |||
| @@ -247,6 +247,7 @@ static int idi_48_probe(struct device *dev, unsigned int id) | |||
| 247 | idi48gpio->irq = irq[id]; | 247 | idi48gpio->irq = irq[id]; |
| 248 | 248 | ||
| 249 | spin_lock_init(&idi48gpio->lock); | 249 | spin_lock_init(&idi48gpio->lock); |
| 250 | spin_lock_init(&idi48gpio->ack_lock); | ||
| 250 | 251 | ||
| 251 | dev_set_drvdata(dev, idi48gpio); | 252 | dev_set_drvdata(dev, idi48gpio); |
| 252 | 253 | ||
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 9aabc48ff5de..953e4b829e32 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c | |||
| @@ -547,11 +547,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio) | |||
| 547 | /* disable interrupts and clear status */ | 547 | /* disable interrupts and clear status */ |
| 548 | for (i = 0; i < kona_gpio->num_bank; i++) { | 548 | for (i = 0; i < kona_gpio->num_bank; i++) { |
| 549 | /* Unlock the entire bank first */ | 549 | /* Unlock the entire bank first */ |
| 550 | bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE); | 550 | bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE); |
| 551 | writel(0xffffffff, reg_base + GPIO_INT_MASK(i)); | 551 | writel(0xffffffff, reg_base + GPIO_INT_MASK(i)); |
| 552 | writel(0xffffffff, reg_base + GPIO_INT_STATUS(i)); | 552 | writel(0xffffffff, reg_base + GPIO_INT_STATUS(i)); |
| 553 | /* Now re-lock the bank */ | 553 | /* Now re-lock the bank */ |
| 554 | bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE); | 554 | bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE); |
| 555 | } | 555 | } |
| 556 | } | 556 | } |
| 557 | 557 | ||
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 75c6355b018d..e72794e463aa 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c | |||
| @@ -709,7 +709,13 @@ static int zynq_gpio_probe(struct platform_device *pdev) | |||
| 709 | dev_err(&pdev->dev, "input clock not found.\n"); | 709 | dev_err(&pdev->dev, "input clock not found.\n"); |
| 710 | return PTR_ERR(gpio->clk); | 710 | return PTR_ERR(gpio->clk); |
| 711 | } | 711 | } |
| 712 | ret = clk_prepare_enable(gpio->clk); | ||
| 713 | if (ret) { | ||
| 714 | dev_err(&pdev->dev, "Unable to enable clock.\n"); | ||
| 715 | return ret; | ||
| 716 | } | ||
| 712 | 717 | ||
| 718 | pm_runtime_set_active(&pdev->dev); | ||
| 713 | pm_runtime_enable(&pdev->dev); | 719 | pm_runtime_enable(&pdev->dev); |
| 714 | ret = pm_runtime_get_sync(&pdev->dev); | 720 | ret = pm_runtime_get_sync(&pdev->dev); |
| 715 | if (ret < 0) | 721 | if (ret < 0) |
| @@ -747,6 +753,7 @@ err_pm_put: | |||
| 747 | pm_runtime_put(&pdev->dev); | 753 | pm_runtime_put(&pdev->dev); |
| 748 | err_pm_dis: | 754 | err_pm_dis: |
| 749 | pm_runtime_disable(&pdev->dev); | 755 | pm_runtime_disable(&pdev->dev); |
| 756 | clk_disable_unprepare(gpio->clk); | ||
| 750 | 757 | ||
| 751 | return ret; | 758 | return ret; |
| 752 | } | 759 | } |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index d22dcc38179d..4aabddb38b59 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
| 19 | #include <linux/io-mapping.h> | ||
| 19 | #include <linux/gpio/consumer.h> | 20 | #include <linux/gpio/consumer.h> |
| 20 | #include <linux/of.h> | 21 | #include <linux/of.h> |
| 21 | #include <linux/of_address.h> | 22 | #include <linux/of_address.h> |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 24f60d28f0c0..570771ed19e6 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -449,7 +449,6 @@ static void gpiodevice_release(struct device *dev) | |||
| 449 | { | 449 | { |
| 450 | struct gpio_device *gdev = dev_get_drvdata(dev); | 450 | struct gpio_device *gdev = dev_get_drvdata(dev); |
| 451 | 451 | ||
| 452 | cdev_del(&gdev->chrdev); | ||
| 453 | list_del(&gdev->list); | 452 | list_del(&gdev->list); |
| 454 | ida_simple_remove(&gpio_ida, gdev->id); | 453 | ida_simple_remove(&gpio_ida, gdev->id); |
| 455 | kfree(gdev->label); | 454 | kfree(gdev->label); |
| @@ -482,7 +481,6 @@ static int gpiochip_setup_dev(struct gpio_device *gdev) | |||
| 482 | 481 | ||
| 483 | /* From this point, the .release() function cleans up gpio_device */ | 482 | /* From this point, the .release() function cleans up gpio_device */ |
| 484 | gdev->dev.release = gpiodevice_release; | 483 | gdev->dev.release = gpiodevice_release; |
| 485 | get_device(&gdev->dev); | ||
| 486 | pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n", | 484 | pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n", |
| 487 | __func__, gdev->base, gdev->base + gdev->ngpio - 1, | 485 | __func__, gdev->base, gdev->base + gdev->ngpio - 1, |
| 488 | dev_name(&gdev->dev), gdev->chip->label ? : "generic"); | 486 | dev_name(&gdev->dev), gdev->chip->label ? : "generic"); |
| @@ -770,6 +768,8 @@ void gpiochip_remove(struct gpio_chip *chip) | |||
| 770 | * be removed, else it will be dangling until the last user is | 768 | * be removed, else it will be dangling until the last user is |
| 771 | * gone. | 769 | * gone. |
| 772 | */ | 770 | */ |
| 771 | cdev_del(&gdev->chrdev); | ||
| 772 | device_del(&gdev->dev); | ||
| 773 | put_device(&gdev->dev); | 773 | put_device(&gdev->dev); |
| 774 | } | 774 | } |
| 775 | EXPORT_SYMBOL_GPL(gpiochip_remove); | 775 | EXPORT_SYMBOL_GPL(gpiochip_remove); |
| @@ -869,7 +869,7 @@ struct gpio_chip *gpiochip_find(void *data, | |||
| 869 | 869 | ||
| 870 | spin_lock_irqsave(&gpio_lock, flags); | 870 | spin_lock_irqsave(&gpio_lock, flags); |
| 871 | list_for_each_entry(gdev, &gpio_devices, list) | 871 | list_for_each_entry(gdev, &gpio_devices, list) |
| 872 | if (match(gdev->chip, data)) | 872 | if (gdev->chip && match(gdev->chip, data)) |
| 873 | break; | 873 | break; |
| 874 | 874 | ||
| 875 | /* No match? */ | 875 | /* No match? */ |
| @@ -1373,8 +1373,12 @@ done: | |||
| 1373 | #define VALIDATE_DESC(desc) do { \ | 1373 | #define VALIDATE_DESC(desc) do { \ |
| 1374 | if (!desc) \ | 1374 | if (!desc) \ |
| 1375 | return 0; \ | 1375 | return 0; \ |
| 1376 | if (IS_ERR(desc)) { \ | ||
| 1377 | pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \ | ||
| 1378 | return PTR_ERR(desc); \ | ||
| 1379 | } \ | ||
| 1376 | if (!desc->gdev) { \ | 1380 | if (!desc->gdev) { \ |
| 1377 | pr_warn("%s: invalid GPIO\n", __func__); \ | 1381 | pr_warn("%s: invalid GPIO (no device)\n", __func__); \ |
| 1378 | return -EINVAL; \ | 1382 | return -EINVAL; \ |
| 1379 | } \ | 1383 | } \ |
| 1380 | if ( !desc->gdev->chip ) { \ | 1384 | if ( !desc->gdev->chip ) { \ |
| @@ -1386,8 +1390,12 @@ done: | |||
| 1386 | #define VALIDATE_DESC_VOID(desc) do { \ | 1390 | #define VALIDATE_DESC_VOID(desc) do { \ |
| 1387 | if (!desc) \ | 1391 | if (!desc) \ |
| 1388 | return; \ | 1392 | return; \ |
| 1393 | if (IS_ERR(desc)) { \ | ||
| 1394 | pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \ | ||
| 1395 | return; \ | ||
| 1396 | } \ | ||
| 1389 | if (!desc->gdev) { \ | 1397 | if (!desc->gdev) { \ |
| 1390 | pr_warn("%s: invalid GPIO\n", __func__); \ | 1398 | pr_warn("%s: invalid GPIO (no device)\n", __func__); \ |
| 1391 | return; \ | 1399 | return; \ |
| 1392 | } \ | 1400 | } \ |
| 1393 | if (!desc->gdev->chip) { \ | 1401 | if (!desc->gdev->chip) { \ |
| @@ -2056,7 +2064,14 @@ int gpiod_to_irq(const struct gpio_desc *desc) | |||
| 2056 | struct gpio_chip *chip; | 2064 | struct gpio_chip *chip; |
| 2057 | int offset; | 2065 | int offset; |
| 2058 | 2066 | ||
| 2059 | VALIDATE_DESC(desc); | 2067 | /* |
| 2068 | * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics | ||
| 2069 | * requires this function to not return zero on an invalid descriptor | ||
| 2070 | * but rather a negative error number. | ||
| 2071 | */ | ||
| 2072 | if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip) | ||
| 2073 | return -EINVAL; | ||
| 2074 | |||
| 2060 | chip = desc->gdev->chip; | 2075 | chip = desc->gdev->chip; |
| 2061 | offset = gpio_chip_hwgpio(desc); | 2076 | offset = gpio_chip_hwgpio(desc); |
| 2062 | if (chip->to_irq) { | 2077 | if (chip->to_irq) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 992f00b65be4..e055d5be1c3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
| @@ -1820,6 +1820,8 @@ struct amdgpu_asic_funcs { | |||
| 1820 | /* MM block clocks */ | 1820 | /* MM block clocks */ |
| 1821 | int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); | 1821 | int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); |
| 1822 | int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); | 1822 | int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); |
| 1823 | /* query virtual capabilities */ | ||
| 1824 | u32 (*get_virtual_caps)(struct amdgpu_device *adev); | ||
| 1823 | }; | 1825 | }; |
| 1824 | 1826 | ||
| 1825 | /* | 1827 | /* |
| @@ -1914,8 +1916,12 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); | |||
| 1914 | 1916 | ||
| 1915 | 1917 | ||
| 1916 | /* GPU virtualization */ | 1918 | /* GPU virtualization */ |
| 1919 | #define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0) | ||
| 1920 | #define AMDGPU_VIRT_CAPS_IS_VF (1 << 1) | ||
| 1917 | struct amdgpu_virtualization { | 1921 | struct amdgpu_virtualization { |
| 1918 | bool supports_sr_iov; | 1922 | bool supports_sr_iov; |
| 1923 | bool is_virtual; | ||
| 1924 | u32 caps; | ||
| 1919 | }; | 1925 | }; |
| 1920 | 1926 | ||
| 1921 | /* | 1927 | /* |
| @@ -2204,6 +2210,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | |||
| 2204 | #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) | 2210 | #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) |
| 2205 | #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) | 2211 | #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) |
| 2206 | #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) | 2212 | #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) |
| 2213 | #define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) | ||
| 2207 | #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) | 2214 | #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) |
| 2208 | #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) | 2215 | #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) |
| 2209 | #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) | 2216 | #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 199f76baf22c..cf6f49fc1c75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
| @@ -696,6 +696,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) | |||
| 696 | return result; | 696 | return result; |
| 697 | } | 697 | } |
| 698 | 698 | ||
| 699 | static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type) | ||
| 700 | { | ||
| 701 | CGS_FUNC_ADEV; | ||
| 702 | if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) { | ||
| 703 | release_firmware(adev->pm.fw); | ||
| 704 | return 0; | ||
| 705 | } | ||
| 706 | /* cannot release other firmware because they are not created by cgs */ | ||
| 707 | return -EINVAL; | ||
| 708 | } | ||
| 709 | |||
| 699 | static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | 710 | static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, |
| 700 | enum cgs_ucode_id type, | 711 | enum cgs_ucode_id type, |
| 701 | struct cgs_firmware_info *info) | 712 | struct cgs_firmware_info *info) |
| @@ -898,7 +909,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, | |||
| 898 | struct cgs_acpi_method_argument *argument = NULL; | 909 | struct cgs_acpi_method_argument *argument = NULL; |
| 899 | uint32_t i, count; | 910 | uint32_t i, count; |
| 900 | acpi_status status; | 911 | acpi_status status; |
| 901 | int result; | 912 | int result = 0; |
| 902 | uint32_t func_no = 0xFFFFFFFF; | 913 | uint32_t func_no = 0xFFFFFFFF; |
| 903 | 914 | ||
| 904 | handle = ACPI_HANDLE(&adev->pdev->dev); | 915 | handle = ACPI_HANDLE(&adev->pdev->dev); |
| @@ -1125,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { | |||
| 1125 | amdgpu_cgs_pm_query_clock_limits, | 1136 | amdgpu_cgs_pm_query_clock_limits, |
| 1126 | amdgpu_cgs_set_camera_voltages, | 1137 | amdgpu_cgs_set_camera_voltages, |
| 1127 | amdgpu_cgs_get_firmware_info, | 1138 | amdgpu_cgs_get_firmware_info, |
| 1139 | amdgpu_cgs_rel_firmware, | ||
| 1128 | amdgpu_cgs_set_powergating_state, | 1140 | amdgpu_cgs_set_powergating_state, |
| 1129 | amdgpu_cgs_set_clockgating_state, | 1141 | amdgpu_cgs_set_clockgating_state, |
| 1130 | amdgpu_cgs_get_active_displays_info, | 1142 | amdgpu_cgs_get_active_displays_info, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index bb8b149786d7..6e920086af46 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -827,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) | |||
| 827 | */ | 827 | */ |
| 828 | static void amdgpu_atombios_fini(struct amdgpu_device *adev) | 828 | static void amdgpu_atombios_fini(struct amdgpu_device *adev) |
| 829 | { | 829 | { |
| 830 | if (adev->mode_info.atom_context) | 830 | if (adev->mode_info.atom_context) { |
| 831 | kfree(adev->mode_info.atom_context->scratch); | 831 | kfree(adev->mode_info.atom_context->scratch); |
| 832 | kfree(adev->mode_info.atom_context->iio); | ||
| 833 | } | ||
| 832 | kfree(adev->mode_info.atom_context); | 834 | kfree(adev->mode_info.atom_context); |
| 833 | adev->mode_info.atom_context = NULL; | 835 | adev->mode_info.atom_context = NULL; |
| 834 | kfree(adev->mode_info.atom_card_info); | 836 | kfree(adev->mode_info.atom_card_info); |
| @@ -1325,6 +1327,11 @@ static int amdgpu_fini(struct amdgpu_device *adev) | |||
| 1325 | adev->ip_block_status[i].valid = false; | 1327 | adev->ip_block_status[i].valid = false; |
| 1326 | } | 1328 | } |
| 1327 | 1329 | ||
| 1330 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | ||
| 1331 | if (adev->ip_blocks[i].funcs->late_fini) | ||
| 1332 | adev->ip_blocks[i].funcs->late_fini((void *)adev); | ||
| 1333 | } | ||
| 1334 | |||
| 1328 | return 0; | 1335 | return 0; |
| 1329 | } | 1336 | } |
| 1330 | 1337 | ||
| @@ -1378,6 +1385,15 @@ static int amdgpu_resume(struct amdgpu_device *adev) | |||
| 1378 | return 0; | 1385 | return 0; |
| 1379 | } | 1386 | } |
| 1380 | 1387 | ||
| 1388 | static bool amdgpu_device_is_virtual(void) | ||
| 1389 | { | ||
| 1390 | #ifdef CONFIG_X86 | ||
| 1391 | return boot_cpu_has(X86_FEATURE_HYPERVISOR); | ||
| 1392 | #else | ||
| 1393 | return false; | ||
| 1394 | #endif | ||
| 1395 | } | ||
| 1396 | |||
| 1381 | /** | 1397 | /** |
| 1382 | * amdgpu_device_init - initialize the driver | 1398 | * amdgpu_device_init - initialize the driver |
| 1383 | * | 1399 | * |
| @@ -1512,9 +1528,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
| 1512 | adev->virtualization.supports_sr_iov = | 1528 | adev->virtualization.supports_sr_iov = |
| 1513 | amdgpu_atombios_has_gpu_virtualization_table(adev); | 1529 | amdgpu_atombios_has_gpu_virtualization_table(adev); |
| 1514 | 1530 | ||
| 1531 | /* Check if we are executing in a virtualized environment */ | ||
| 1532 | adev->virtualization.is_virtual = amdgpu_device_is_virtual(); | ||
| 1533 | adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev); | ||
| 1534 | |||
| 1515 | /* Post card if necessary */ | 1535 | /* Post card if necessary */ |
| 1516 | if (!amdgpu_card_posted(adev) || | 1536 | if (!amdgpu_card_posted(adev) || |
| 1517 | adev->virtualization.supports_sr_iov) { | 1537 | (adev->virtualization.is_virtual && |
| 1538 | !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) { | ||
| 1518 | if (!adev->bios) { | 1539 | if (!adev->bios) { |
| 1519 | dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); | 1540 | dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); |
| 1520 | return -EINVAL; | 1541 | return -EINVAL; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 40a23704a981..d851ea15059f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
| @@ -447,7 +447,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
| 447 | dev_info.max_memory_clock = adev->pm.default_mclk * 10; | 447 | dev_info.max_memory_clock = adev->pm.default_mclk * 10; |
| 448 | } | 448 | } |
| 449 | dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; | 449 | dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; |
| 450 | dev_info.num_rb_pipes = adev->gfx.config.num_rbs; | 450 | dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * |
| 451 | adev->gfx.config.max_shader_engines; | ||
| 451 | dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; | 452 | dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; |
| 452 | dev_info._pad = 0; | 453 | dev_info._pad = 0; |
| 453 | dev_info.ids_flags = 0; | 454 | dev_info.ids_flags = 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 589b36e8c5cf..0e13d80d2a95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
| @@ -270,30 +270,28 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, | |||
| 270 | struct drm_device *ddev = dev_get_drvdata(dev); | 270 | struct drm_device *ddev = dev_get_drvdata(dev); |
| 271 | struct amdgpu_device *adev = ddev->dev_private; | 271 | struct amdgpu_device *adev = ddev->dev_private; |
| 272 | enum amd_pm_state_type state = 0; | 272 | enum amd_pm_state_type state = 0; |
| 273 | long idx; | 273 | unsigned long idx; |
| 274 | int ret; | 274 | int ret; |
| 275 | 275 | ||
| 276 | if (strlen(buf) == 1) | 276 | if (strlen(buf) == 1) |
| 277 | adev->pp_force_state_enabled = false; | 277 | adev->pp_force_state_enabled = false; |
| 278 | else { | 278 | else if (adev->pp_enabled) { |
| 279 | ret = kstrtol(buf, 0, &idx); | 279 | struct pp_states_info data; |
| 280 | 280 | ||
| 281 | if (ret) { | 281 | ret = kstrtoul(buf, 0, &idx); |
| 282 | if (ret || idx >= ARRAY_SIZE(data.states)) { | ||
| 282 | count = -EINVAL; | 283 | count = -EINVAL; |
| 283 | goto fail; | 284 | goto fail; |
| 284 | } | 285 | } |
| 285 | 286 | ||
| 286 | if (adev->pp_enabled) { | 287 | amdgpu_dpm_get_pp_num_states(adev, &data); |
| 287 | struct pp_states_info data; | 288 | state = data.states[idx]; |
| 288 | amdgpu_dpm_get_pp_num_states(adev, &data); | 289 | /* only set user selected power states */ |
| 289 | state = data.states[idx]; | 290 | if (state != POWER_STATE_TYPE_INTERNAL_BOOT && |
| 290 | /* only set user selected power states */ | 291 | state != POWER_STATE_TYPE_DEFAULT) { |
| 291 | if (state != POWER_STATE_TYPE_INTERNAL_BOOT && | 292 | amdgpu_dpm_dispatch_task(adev, |
| 292 | state != POWER_STATE_TYPE_DEFAULT) { | 293 | AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); |
| 293 | amdgpu_dpm_dispatch_task(adev, | 294 | adev->pp_force_state_enabled = true; |
| 294 | AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); | ||
| 295 | adev->pp_force_state_enabled = true; | ||
| 296 | } | ||
| 297 | } | 295 | } |
| 298 | } | 296 | } |
| 299 | fail: | 297 | fail: |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 6bd961fb43dc..82256558e0f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | |||
| @@ -183,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle) | |||
| 183 | if (ret) | 183 | if (ret) |
| 184 | return ret; | 184 | return ret; |
| 185 | 185 | ||
| 186 | #ifdef CONFIG_DRM_AMD_POWERPLAY | ||
| 187 | if (adev->pp_enabled) { | ||
| 188 | amdgpu_pm_sysfs_fini(adev); | ||
| 189 | amd_powerplay_fini(adev->powerplay.pp_handle); | ||
| 190 | } | ||
| 191 | #endif | ||
| 192 | |||
| 193 | return ret; | 186 | return ret; |
| 194 | } | 187 | } |
| 195 | 188 | ||
| @@ -223,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle) | |||
| 223 | return ret; | 216 | return ret; |
| 224 | } | 217 | } |
| 225 | 218 | ||
| 219 | static void amdgpu_pp_late_fini(void *handle) | ||
| 220 | { | ||
| 221 | #ifdef CONFIG_DRM_AMD_POWERPLAY | ||
| 222 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 223 | |||
| 224 | if (adev->pp_enabled) { | ||
| 225 | amdgpu_pm_sysfs_fini(adev); | ||
| 226 | amd_powerplay_fini(adev->powerplay.pp_handle); | ||
| 227 | } | ||
| 228 | |||
| 229 | if (adev->powerplay.ip_funcs->late_fini) | ||
| 230 | adev->powerplay.ip_funcs->late_fini( | ||
| 231 | adev->powerplay.pp_handle); | ||
| 232 | #endif | ||
| 233 | } | ||
| 234 | |||
| 226 | static int amdgpu_pp_suspend(void *handle) | 235 | static int amdgpu_pp_suspend(void *handle) |
| 227 | { | 236 | { |
| 228 | int ret = 0; | 237 | int ret = 0; |
| @@ -311,6 +320,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = { | |||
| 311 | .sw_fini = amdgpu_pp_sw_fini, | 320 | .sw_fini = amdgpu_pp_sw_fini, |
| 312 | .hw_init = amdgpu_pp_hw_init, | 321 | .hw_init = amdgpu_pp_hw_init, |
| 313 | .hw_fini = amdgpu_pp_hw_fini, | 322 | .hw_fini = amdgpu_pp_hw_fini, |
| 323 | .late_fini = amdgpu_pp_late_fini, | ||
| 314 | .suspend = amdgpu_pp_suspend, | 324 | .suspend = amdgpu_pp_suspend, |
| 315 | .resume = amdgpu_pp_resume, | 325 | .resume = amdgpu_pp_resume, |
| 316 | .is_idle = amdgpu_pp_is_idle, | 326 | .is_idle = amdgpu_pp_is_idle, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 3b02272db678..870f9494252c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
| @@ -343,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) | |||
| 343 | ring->ring = NULL; | 343 | ring->ring = NULL; |
| 344 | ring->ring_obj = NULL; | 344 | ring->ring_obj = NULL; |
| 345 | 345 | ||
| 346 | amdgpu_wb_free(ring->adev, ring->cond_exe_offs); | ||
| 346 | amdgpu_wb_free(ring->adev, ring->fence_offs); | 347 | amdgpu_wb_free(ring->adev, ring->fence_offs); |
| 347 | amdgpu_wb_free(ring->adev, ring->rptr_offs); | 348 | amdgpu_wb_free(ring->adev, ring->rptr_offs); |
| 348 | amdgpu_wb_free(ring->adev, ring->wptr_offs); | 349 | amdgpu_wb_free(ring->adev, ring->wptr_offs); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 8bf84efafb04..48618ee324eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
| @@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, | |||
| 115 | return r; | 115 | return r; |
| 116 | } | 116 | } |
| 117 | r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); | 117 | r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); |
| 118 | memset(sa_manager->cpu_ptr, 0, sa_manager->size); | ||
| 118 | amdgpu_bo_unreserve(sa_manager->bo); | 119 | amdgpu_bo_unreserve(sa_manager->bo); |
| 119 | return r; | 120 | return r; |
| 120 | } | 121 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 01abfc21b4a2..e19520c4b4b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
| @@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) | |||
| 253 | { | 253 | { |
| 254 | int r; | 254 | int r; |
| 255 | 255 | ||
| 256 | if (adev->uvd.vcpu_bo == NULL) | 256 | kfree(adev->uvd.saved_bo); |
| 257 | return 0; | ||
| 258 | 257 | ||
| 259 | amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); | 258 | amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); |
| 260 | 259 | ||
| 261 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); | 260 | if (adev->uvd.vcpu_bo) { |
| 262 | if (!r) { | 261 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); |
| 263 | amdgpu_bo_kunmap(adev->uvd.vcpu_bo); | 262 | if (!r) { |
| 264 | amdgpu_bo_unpin(adev->uvd.vcpu_bo); | 263 | amdgpu_bo_kunmap(adev->uvd.vcpu_bo); |
| 265 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | 264 | amdgpu_bo_unpin(adev->uvd.vcpu_bo); |
| 266 | } | 265 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); |
| 266 | } | ||
| 267 | 267 | ||
| 268 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | 268 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); |
| 269 | } | ||
| 269 | 270 | ||
| 270 | amdgpu_ring_fini(&adev->uvd.ring); | 271 | amdgpu_ring_fini(&adev->uvd.ring); |
| 271 | 272 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index ea407db1fbcf..5ec1f1e9c983 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
| @@ -6221,6 +6221,9 @@ static int ci_dpm_sw_fini(void *handle) | |||
| 6221 | ci_dpm_fini(adev); | 6221 | ci_dpm_fini(adev); |
| 6222 | mutex_unlock(&adev->pm.mutex); | 6222 | mutex_unlock(&adev->pm.mutex); |
| 6223 | 6223 | ||
| 6224 | release_firmware(adev->pm.fw); | ||
| 6225 | adev->pm.fw = NULL; | ||
| 6226 | |||
| 6224 | return 0; | 6227 | return 0; |
| 6225 | } | 6228 | } |
| 6226 | 6229 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 07bc795a4ca9..910431808542 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c | |||
| @@ -962,6 +962,12 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev, | |||
| 962 | return true; | 962 | return true; |
| 963 | } | 963 | } |
| 964 | 964 | ||
| 965 | static u32 cik_get_virtual_caps(struct amdgpu_device *adev) | ||
| 966 | { | ||
| 967 | /* CIK does not support SR-IOV */ | ||
| 968 | return 0; | ||
| 969 | } | ||
| 970 | |||
| 965 | static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { | 971 | static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { |
| 966 | {mmGRBM_STATUS, false}, | 972 | {mmGRBM_STATUS, false}, |
| 967 | {mmGB_ADDR_CONFIG, false}, | 973 | {mmGB_ADDR_CONFIG, false}, |
| @@ -2007,6 +2013,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = | |||
| 2007 | .get_xclk = &cik_get_xclk, | 2013 | .get_xclk = &cik_get_xclk, |
| 2008 | .set_uvd_clocks = &cik_set_uvd_clocks, | 2014 | .set_uvd_clocks = &cik_set_uvd_clocks, |
| 2009 | .set_vce_clocks = &cik_set_vce_clocks, | 2015 | .set_vce_clocks = &cik_set_vce_clocks, |
| 2016 | .get_virtual_caps = &cik_get_virtual_caps, | ||
| 2010 | /* these should be moved to their own ip modules */ | 2017 | /* these should be moved to their own ip modules */ |
| 2011 | .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, | 2018 | .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, |
| 2012 | .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle, | 2019 | .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle, |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 518dca43b133..9dc4e24e31e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
| @@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); | |||
| 66 | 66 | ||
| 67 | u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); | 67 | u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); |
| 68 | 68 | ||
| 69 | |||
| 70 | static void cik_sdma_free_microcode(struct amdgpu_device *adev) | ||
| 71 | { | ||
| 72 | int i; | ||
| 73 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 74 | release_firmware(adev->sdma.instance[i].fw); | ||
| 75 | adev->sdma.instance[i].fw = NULL; | ||
| 76 | } | ||
| 77 | } | ||
| 78 | |||
| 69 | /* | 79 | /* |
| 70 | * sDMA - System DMA | 80 | * sDMA - System DMA |
| 71 | * Starting with CIK, the GPU has new asynchronous | 81 | * Starting with CIK, the GPU has new asynchronous |
| @@ -419,6 +429,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) | |||
| 419 | /* Initialize the ring buffer's read and write pointers */ | 429 | /* Initialize the ring buffer's read and write pointers */ |
| 420 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); | 430 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); |
| 421 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | 431 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); |
| 432 | WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); | ||
| 433 | WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); | ||
| 422 | 434 | ||
| 423 | /* set the wb address whether it's enabled or not */ | 435 | /* set the wb address whether it's enabled or not */ |
| 424 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | 436 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], |
| @@ -446,7 +458,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) | |||
| 446 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 458 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
| 447 | 459 | ||
| 448 | ring->ready = true; | 460 | ring->ready = true; |
| 461 | } | ||
| 462 | |||
| 463 | cik_sdma_enable(adev, true); | ||
| 449 | 464 | ||
| 465 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 466 | ring = &adev->sdma.instance[i].ring; | ||
| 450 | r = amdgpu_ring_test_ring(ring); | 467 | r = amdgpu_ring_test_ring(ring); |
| 451 | if (r) { | 468 | if (r) { |
| 452 | ring->ready = false; | 469 | ring->ready = false; |
| @@ -529,8 +546,8 @@ static int cik_sdma_start(struct amdgpu_device *adev) | |||
| 529 | if (r) | 546 | if (r) |
| 530 | return r; | 547 | return r; |
| 531 | 548 | ||
| 532 | /* unhalt the MEs */ | 549 | /* halt the engine before programing */ |
| 533 | cik_sdma_enable(adev, true); | 550 | cik_sdma_enable(adev, false); |
| 534 | 551 | ||
| 535 | /* start the gfx rings and rlc compute queues */ | 552 | /* start the gfx rings and rlc compute queues */ |
| 536 | r = cik_sdma_gfx_resume(adev); | 553 | r = cik_sdma_gfx_resume(adev); |
| @@ -998,6 +1015,7 @@ static int cik_sdma_sw_fini(void *handle) | |||
| 998 | for (i = 0; i < adev->sdma.num_instances; i++) | 1015 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 999 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | 1016 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1000 | 1017 | ||
| 1018 | cik_sdma_free_microcode(adev); | ||
| 1001 | return 0; | 1019 | return 0; |
| 1002 | } | 1020 | } |
| 1003 | 1021 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c index 245cabf06575..ed03b75175d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c | |||
| @@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle) | |||
| 72 | 72 | ||
| 73 | static int fiji_dpm_sw_fini(void *handle) | 73 | static int fiji_dpm_sw_fini(void *handle) |
| 74 | { | 74 | { |
| 75 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 76 | |||
| 77 | release_firmware(adev->pm.fw); | ||
| 78 | adev->pm.fw = NULL; | ||
| 79 | |||
| 75 | return 0; | 80 | return 0; |
| 76 | } | 81 | } |
| 77 | 82 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 7f18a53ab53a..fc8ff4d3ccf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
| @@ -991,6 +991,22 @@ out: | |||
| 991 | return err; | 991 | return err; |
| 992 | } | 992 | } |
| 993 | 993 | ||
| 994 | static void gfx_v7_0_free_microcode(struct amdgpu_device *adev) | ||
| 995 | { | ||
| 996 | release_firmware(adev->gfx.pfp_fw); | ||
| 997 | adev->gfx.pfp_fw = NULL; | ||
| 998 | release_firmware(adev->gfx.me_fw); | ||
| 999 | adev->gfx.me_fw = NULL; | ||
| 1000 | release_firmware(adev->gfx.ce_fw); | ||
| 1001 | adev->gfx.ce_fw = NULL; | ||
| 1002 | release_firmware(adev->gfx.mec_fw); | ||
| 1003 | adev->gfx.mec_fw = NULL; | ||
| 1004 | release_firmware(adev->gfx.mec2_fw); | ||
| 1005 | adev->gfx.mec2_fw = NULL; | ||
| 1006 | release_firmware(adev->gfx.rlc_fw); | ||
| 1007 | adev->gfx.rlc_fw = NULL; | ||
| 1008 | } | ||
| 1009 | |||
| 994 | /** | 1010 | /** |
| 995 | * gfx_v7_0_tiling_mode_table_init - init the hw tiling table | 1011 | * gfx_v7_0_tiling_mode_table_init - init the hw tiling table |
| 996 | * | 1012 | * |
| @@ -4489,6 +4505,7 @@ static int gfx_v7_0_sw_fini(void *handle) | |||
| 4489 | gfx_v7_0_cp_compute_fini(adev); | 4505 | gfx_v7_0_cp_compute_fini(adev); |
| 4490 | gfx_v7_0_rlc_fini(adev); | 4506 | gfx_v7_0_rlc_fini(adev); |
| 4491 | gfx_v7_0_mec_fini(adev); | 4507 | gfx_v7_0_mec_fini(adev); |
| 4508 | gfx_v7_0_free_microcode(adev); | ||
| 4492 | 4509 | ||
| 4493 | return 0; | 4510 | return 0; |
| 4494 | } | 4511 | } |
| @@ -4816,7 +4833,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev, | |||
| 4816 | case 2: | 4833 | case 2: |
| 4817 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | 4834 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
| 4818 | ring = &adev->gfx.compute_ring[i]; | 4835 | ring = &adev->gfx.compute_ring[i]; |
| 4819 | if ((ring->me == me_id) & (ring->pipe == pipe_id)) | 4836 | if ((ring->me == me_id) && (ring->pipe == pipe_id)) |
| 4820 | amdgpu_fence_process(ring); | 4837 | amdgpu_fence_process(ring); |
| 4821 | } | 4838 | } |
| 4822 | break; | 4839 | break; |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index f19bab68fd83..1a5cbaff1e34 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -297,7 +297,8 @@ static const u32 polaris11_golden_common_all[] = | |||
| 297 | static const u32 golden_settings_polaris10_a11[] = | 297 | static const u32 golden_settings_polaris10_a11[] = |
| 298 | { | 298 | { |
| 299 | mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, | 299 | mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, |
| 300 | mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, | 300 | mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208, |
| 301 | mmCB_HW_CONTROL_2, 0, 0x0f000000, | ||
| 301 | mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, | 302 | mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, |
| 302 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, | 303 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, |
| 303 | mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, | 304 | mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, |
| @@ -836,6 +837,26 @@ err1: | |||
| 836 | return r; | 837 | return r; |
| 837 | } | 838 | } |
| 838 | 839 | ||
| 840 | |||
| 841 | static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) { | ||
| 842 | release_firmware(adev->gfx.pfp_fw); | ||
| 843 | adev->gfx.pfp_fw = NULL; | ||
| 844 | release_firmware(adev->gfx.me_fw); | ||
| 845 | adev->gfx.me_fw = NULL; | ||
| 846 | release_firmware(adev->gfx.ce_fw); | ||
| 847 | adev->gfx.ce_fw = NULL; | ||
| 848 | release_firmware(adev->gfx.rlc_fw); | ||
| 849 | adev->gfx.rlc_fw = NULL; | ||
| 850 | release_firmware(adev->gfx.mec_fw); | ||
| 851 | adev->gfx.mec_fw = NULL; | ||
| 852 | if ((adev->asic_type != CHIP_STONEY) && | ||
| 853 | (adev->asic_type != CHIP_TOPAZ)) | ||
| 854 | release_firmware(adev->gfx.mec2_fw); | ||
| 855 | adev->gfx.mec2_fw = NULL; | ||
| 856 | |||
| 857 | kfree(adev->gfx.rlc.register_list_format); | ||
| 858 | } | ||
| 859 | |||
| 839 | static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) | 860 | static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) |
| 840 | { | 861 | { |
| 841 | const char *chip_name; | 862 | const char *chip_name; |
| @@ -1983,7 +2004,7 @@ static int gfx_v8_0_sw_fini(void *handle) | |||
| 1983 | 2004 | ||
| 1984 | gfx_v8_0_rlc_fini(adev); | 2005 | gfx_v8_0_rlc_fini(adev); |
| 1985 | 2006 | ||
| 1986 | kfree(adev->gfx.rlc.register_list_format); | 2007 | gfx_v8_0_free_microcode(adev); |
| 1987 | 2008 | ||
| 1988 | return 0; | 2009 | return 0; |
| 1989 | } | 2010 | } |
| @@ -3974,11 +3995,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) | |||
| 3974 | amdgpu_ring_write(ring, 0x3a00161a); | 3995 | amdgpu_ring_write(ring, 0x3a00161a); |
| 3975 | amdgpu_ring_write(ring, 0x0000002e); | 3996 | amdgpu_ring_write(ring, 0x0000002e); |
| 3976 | break; | 3997 | break; |
| 3977 | case CHIP_TOPAZ: | ||
| 3978 | case CHIP_CARRIZO: | 3998 | case CHIP_CARRIZO: |
| 3979 | amdgpu_ring_write(ring, 0x00000002); | 3999 | amdgpu_ring_write(ring, 0x00000002); |
| 3980 | amdgpu_ring_write(ring, 0x00000000); | 4000 | amdgpu_ring_write(ring, 0x00000000); |
| 3981 | break; | 4001 | break; |
| 4002 | case CHIP_TOPAZ: | ||
| 4003 | amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ? | ||
| 4004 | 0x00000000 : 0x00000002); | ||
| 4005 | amdgpu_ring_write(ring, 0x00000000); | ||
| 4006 | break; | ||
| 3982 | case CHIP_STONEY: | 4007 | case CHIP_STONEY: |
| 3983 | amdgpu_ring_write(ring, 0x00000000); | 4008 | amdgpu_ring_write(ring, 0x00000000); |
| 3984 | amdgpu_ring_write(ring, 0x00000000); | 4009 | amdgpu_ring_write(ring, 0x00000000); |
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c index 460bc8ad37e6..825ccd63f2dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c | |||
| @@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle) | |||
| 72 | 72 | ||
| 73 | static int iceland_dpm_sw_fini(void *handle) | 73 | static int iceland_dpm_sw_fini(void *handle) |
| 74 | { | 74 | { |
| 75 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 76 | |||
| 77 | release_firmware(adev->pm.fw); | ||
| 78 | adev->pm.fw = NULL; | ||
| 79 | |||
| 75 | return 0; | 80 | return 0; |
| 76 | } | 81 | } |
| 77 | 82 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index f4c3130d3fdb..b556bd0a8797 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
| @@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) | |||
| 105 | } | 105 | } |
| 106 | } | 106 | } |
| 107 | 107 | ||
| 108 | static void sdma_v2_4_free_microcode(struct amdgpu_device *adev) | ||
| 109 | { | ||
| 110 | int i; | ||
| 111 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 112 | release_firmware(adev->sdma.instance[i].fw); | ||
| 113 | adev->sdma.instance[i].fw = NULL; | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 108 | /** | 117 | /** |
| 109 | * sdma_v2_4_init_microcode - load ucode images from disk | 118 | * sdma_v2_4_init_microcode - load ucode images from disk |
| 110 | * | 119 | * |
| @@ -461,6 +470,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) | |||
| 461 | /* Initialize the ring buffer's read and write pointers */ | 470 | /* Initialize the ring buffer's read and write pointers */ |
| 462 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); | 471 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); |
| 463 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | 472 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); |
| 473 | WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); | ||
| 474 | WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); | ||
| 464 | 475 | ||
| 465 | /* set the wb address whether it's enabled or not */ | 476 | /* set the wb address whether it's enabled or not */ |
| 466 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | 477 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], |
| @@ -489,7 +500,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) | |||
| 489 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 500 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
| 490 | 501 | ||
| 491 | ring->ready = true; | 502 | ring->ready = true; |
| 503 | } | ||
| 492 | 504 | ||
| 505 | sdma_v2_4_enable(adev, true); | ||
| 506 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 507 | ring = &adev->sdma.instance[i].ring; | ||
| 493 | r = amdgpu_ring_test_ring(ring); | 508 | r = amdgpu_ring_test_ring(ring); |
| 494 | if (r) { | 509 | if (r) { |
| 495 | ring->ready = false; | 510 | ring->ready = false; |
| @@ -580,8 +595,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev) | |||
| 580 | return -EINVAL; | 595 | return -EINVAL; |
| 581 | } | 596 | } |
| 582 | 597 | ||
| 583 | /* unhalt the MEs */ | 598 | /* halt the engine before programing */ |
| 584 | sdma_v2_4_enable(adev, true); | 599 | sdma_v2_4_enable(adev, false); |
| 585 | 600 | ||
| 586 | /* start the gfx rings and rlc compute queues */ | 601 | /* start the gfx rings and rlc compute queues */ |
| 587 | r = sdma_v2_4_gfx_resume(adev); | 602 | r = sdma_v2_4_gfx_resume(adev); |
| @@ -1012,6 +1027,7 @@ static int sdma_v2_4_sw_fini(void *handle) | |||
| 1012 | for (i = 0; i < adev->sdma.num_instances; i++) | 1027 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 1013 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | 1028 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1014 | 1029 | ||
| 1030 | sdma_v2_4_free_microcode(adev); | ||
| 1015 | return 0; | 1031 | return 0; |
| 1016 | } | 1032 | } |
| 1017 | 1033 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 31d99b0010f7..532ea88da66a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
| @@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) | |||
| 236 | } | 236 | } |
| 237 | } | 237 | } |
| 238 | 238 | ||
| 239 | static void sdma_v3_0_free_microcode(struct amdgpu_device *adev) | ||
| 240 | { | ||
| 241 | int i; | ||
| 242 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 243 | release_firmware(adev->sdma.instance[i].fw); | ||
| 244 | adev->sdma.instance[i].fw = NULL; | ||
| 245 | } | ||
| 246 | } | ||
| 247 | |||
| 239 | /** | 248 | /** |
| 240 | * sdma_v3_0_init_microcode - load ucode images from disk | 249 | * sdma_v3_0_init_microcode - load ucode images from disk |
| 241 | * | 250 | * |
| @@ -672,6 +681,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) | |||
| 672 | /* Initialize the ring buffer's read and write pointers */ | 681 | /* Initialize the ring buffer's read and write pointers */ |
| 673 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); | 682 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); |
| 674 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | 683 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); |
| 684 | WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); | ||
| 685 | WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); | ||
| 675 | 686 | ||
| 676 | /* set the wb address whether it's enabled or not */ | 687 | /* set the wb address whether it's enabled or not */ |
| 677 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | 688 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], |
| @@ -711,7 +722,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) | |||
| 711 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 722 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
| 712 | 723 | ||
| 713 | ring->ready = true; | 724 | ring->ready = true; |
| 725 | } | ||
| 726 | |||
| 727 | /* unhalt the MEs */ | ||
| 728 | sdma_v3_0_enable(adev, true); | ||
| 729 | /* enable sdma ring preemption */ | ||
| 730 | sdma_v3_0_ctx_switch_enable(adev, true); | ||
| 714 | 731 | ||
| 732 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 733 | ring = &adev->sdma.instance[i].ring; | ||
| 715 | r = amdgpu_ring_test_ring(ring); | 734 | r = amdgpu_ring_test_ring(ring); |
| 716 | if (r) { | 735 | if (r) { |
| 717 | ring->ready = false; | 736 | ring->ready = false; |
| @@ -804,10 +823,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) | |||
| 804 | } | 823 | } |
| 805 | } | 824 | } |
| 806 | 825 | ||
| 807 | /* unhalt the MEs */ | 826 | /* disble sdma engine before programing it */ |
| 808 | sdma_v3_0_enable(adev, true); | 827 | sdma_v3_0_ctx_switch_enable(adev, false); |
| 809 | /* enable sdma ring preemption */ | 828 | sdma_v3_0_enable(adev, false); |
| 810 | sdma_v3_0_ctx_switch_enable(adev, true); | ||
| 811 | 829 | ||
| 812 | /* start the gfx rings and rlc compute queues */ | 830 | /* start the gfx rings and rlc compute queues */ |
| 813 | r = sdma_v3_0_gfx_resume(adev); | 831 | r = sdma_v3_0_gfx_resume(adev); |
| @@ -1247,6 +1265,7 @@ static int sdma_v3_0_sw_fini(void *handle) | |||
| 1247 | for (i = 0; i < adev->sdma.num_instances; i++) | 1265 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 1248 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | 1266 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1249 | 1267 | ||
| 1268 | sdma_v3_0_free_microcode(adev); | ||
| 1250 | return 0; | 1269 | return 0; |
| 1251 | } | 1270 | } |
| 1252 | 1271 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c index b7615cefcac4..f06f6f4dc3a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c | |||
| @@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle) | |||
| 71 | 71 | ||
| 72 | static int tonga_dpm_sw_fini(void *handle) | 72 | static int tonga_dpm_sw_fini(void *handle) |
| 73 | { | 73 | { |
| 74 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 75 | |||
| 76 | release_firmware(adev->pm.fw); | ||
| 77 | adev->pm.fw = NULL; | ||
| 78 | |||
| 74 | return 0; | 79 | return 0; |
| 75 | } | 80 | } |
| 76 | 81 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 2c88d0b66cf3..a65c96029476 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c | |||
| @@ -421,6 +421,20 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev, | |||
| 421 | return true; | 421 | return true; |
| 422 | } | 422 | } |
| 423 | 423 | ||
| 424 | static u32 vi_get_virtual_caps(struct amdgpu_device *adev) | ||
| 425 | { | ||
| 426 | u32 caps = 0; | ||
| 427 | u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); | ||
| 428 | |||
| 429 | if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) | ||
| 430 | caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; | ||
| 431 | |||
| 432 | if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) | ||
| 433 | caps |= AMDGPU_VIRT_CAPS_IS_VF; | ||
| 434 | |||
| 435 | return caps; | ||
| 436 | } | ||
| 437 | |||
| 424 | static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { | 438 | static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { |
| 425 | {mmGB_MACROTILE_MODE7, true}, | 439 | {mmGB_MACROTILE_MODE7, true}, |
| 426 | }; | 440 | }; |
| @@ -1118,6 +1132,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = | |||
| 1118 | .get_xclk = &vi_get_xclk, | 1132 | .get_xclk = &vi_get_xclk, |
| 1119 | .set_uvd_clocks = &vi_set_uvd_clocks, | 1133 | .set_uvd_clocks = &vi_set_uvd_clocks, |
| 1120 | .set_vce_clocks = &vi_set_vce_clocks, | 1134 | .set_vce_clocks = &vi_set_vce_clocks, |
| 1135 | .get_virtual_caps = &vi_get_virtual_caps, | ||
| 1121 | /* these should be moved to their own ip modules */ | 1136 | /* these should be moved to their own ip modules */ |
| 1122 | .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, | 1137 | .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, |
| 1123 | .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, | 1138 | .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index ac005796b71c..7708d90b9da9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c | |||
| @@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, | |||
| 242 | pqm_uninit(&p->pqm); | 242 | pqm_uninit(&p->pqm); |
| 243 | 243 | ||
| 244 | /* Iterate over all process device data structure and check | 244 | /* Iterate over all process device data structure and check |
| 245 | * if we should reset all wavefronts */ | 245 | * if we should delete debug managers and reset all wavefronts |
| 246 | list_for_each_entry(pdd, &p->per_device_data, per_device_list) | 246 | */ |
| 247 | list_for_each_entry(pdd, &p->per_device_data, per_device_list) { | ||
| 248 | if ((pdd->dev->dbgmgr) && | ||
| 249 | (pdd->dev->dbgmgr->pasid == p->pasid)) | ||
| 250 | kfd_dbgmgr_destroy(pdd->dev->dbgmgr); | ||
| 251 | |||
| 247 | if (pdd->reset_wavefronts) { | 252 | if (pdd->reset_wavefronts) { |
| 248 | pr_warn("amdkfd: Resetting all wave fronts\n"); | 253 | pr_warn("amdkfd: Resetting all wave fronts\n"); |
| 249 | dbgdev_wave_reset_wavefronts(pdd->dev, p); | 254 | dbgdev_wave_reset_wavefronts(pdd->dev, p); |
| 250 | pdd->reset_wavefronts = false; | 255 | pdd->reset_wavefronts = false; |
| 251 | } | 256 | } |
| 257 | } | ||
| 252 | 258 | ||
| 253 | mutex_unlock(&p->mutex); | 259 | mutex_unlock(&p->mutex); |
| 254 | 260 | ||
| @@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) | |||
| 404 | 410 | ||
| 405 | idx = srcu_read_lock(&kfd_processes_srcu); | 411 | idx = srcu_read_lock(&kfd_processes_srcu); |
| 406 | 412 | ||
| 413 | /* | ||
| 414 | * Look for the process that matches the pasid. If there is no such | ||
| 415 | * process, we either released it in amdkfd's own notifier, or there | ||
| 416 | * is a bug. Unfortunately, there is no way to tell... | ||
| 417 | */ | ||
| 407 | hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes) | 418 | hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes) |
| 408 | if (p->pasid == pasid) | 419 | if (p->pasid == pasid) { |
| 409 | break; | ||
| 410 | 420 | ||
| 411 | srcu_read_unlock(&kfd_processes_srcu, idx); | 421 | srcu_read_unlock(&kfd_processes_srcu, idx); |
| 412 | 422 | ||
| 413 | BUG_ON(p->pasid != pasid); | 423 | pr_debug("Unbinding process %d from IOMMU\n", pasid); |
| 414 | 424 | ||
| 415 | mutex_lock(&p->mutex); | 425 | mutex_lock(&p->mutex); |
| 416 | 426 | ||
| 417 | if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) | 427 | if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) |
| 418 | kfd_dbgmgr_destroy(dev->dbgmgr); | 428 | kfd_dbgmgr_destroy(dev->dbgmgr); |
| 419 | 429 | ||
| 420 | pqm_uninit(&p->pqm); | 430 | pqm_uninit(&p->pqm); |
| 421 | 431 | ||
| 422 | pdd = kfd_get_process_device_data(dev, p); | 432 | pdd = kfd_get_process_device_data(dev, p); |
| 423 | 433 | ||
| 424 | if (!pdd) { | 434 | if (!pdd) { |
| 425 | mutex_unlock(&p->mutex); | 435 | mutex_unlock(&p->mutex); |
| 426 | return; | 436 | return; |
| 427 | } | 437 | } |
| 428 | 438 | ||
| 429 | if (pdd->reset_wavefronts) { | 439 | if (pdd->reset_wavefronts) { |
| 430 | dbgdev_wave_reset_wavefronts(pdd->dev, p); | 440 | dbgdev_wave_reset_wavefronts(pdd->dev, p); |
| 431 | pdd->reset_wavefronts = false; | 441 | pdd->reset_wavefronts = false; |
| 432 | } | 442 | } |
| 433 | 443 | ||
| 434 | /* | 444 | /* |
| 435 | * Just mark pdd as unbound, because we still need it to call | 445 | * Just mark pdd as unbound, because we still need it |
| 436 | * amd_iommu_unbind_pasid() in when the process exits. | 446 | * to call amd_iommu_unbind_pasid() in when the |
| 437 | * We don't call amd_iommu_unbind_pasid() here | 447 | * process exits. |
| 438 | * because the IOMMU called us. | 448 | * We don't call amd_iommu_unbind_pasid() here |
| 439 | */ | 449 | * because the IOMMU called us. |
| 440 | pdd->bound = false; | 450 | */ |
| 451 | pdd->bound = false; | ||
| 441 | 452 | ||
| 442 | mutex_unlock(&p->mutex); | 453 | mutex_unlock(&p->mutex); |
| 454 | |||
| 455 | return; | ||
| 456 | } | ||
| 457 | |||
| 458 | srcu_read_unlock(&kfd_processes_srcu, idx); | ||
| 443 | } | 459 | } |
| 444 | 460 | ||
| 445 | struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) | 461 | struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 74909e72a009..884c96f50c3d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c | |||
| @@ -666,7 +666,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, | |||
| 666 | dev->node_props.simd_count); | 666 | dev->node_props.simd_count); |
| 667 | 667 | ||
| 668 | if (dev->mem_bank_count < dev->node_props.mem_banks_count) { | 668 | if (dev->mem_bank_count < dev->node_props.mem_banks_count) { |
| 669 | pr_warn("kfd: mem_banks_count truncated from %d to %d\n", | 669 | pr_info_once("kfd: mem_banks_count truncated from %d to %d\n", |
| 670 | dev->node_props.mem_banks_count, | 670 | dev->node_props.mem_banks_count, |
| 671 | dev->mem_bank_count); | 671 | dev->mem_bank_count); |
| 672 | sysfs_show_32bit_prop(buffer, "mem_banks_count", | 672 | sysfs_show_32bit_prop(buffer, "mem_banks_count", |
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 6080951d539d..afce1edbe250 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h | |||
| @@ -157,6 +157,7 @@ struct amd_ip_funcs { | |||
| 157 | int (*hw_init)(void *handle); | 157 | int (*hw_init)(void *handle); |
| 158 | /* tears down the hw state */ | 158 | /* tears down the hw state */ |
| 159 | int (*hw_fini)(void *handle); | 159 | int (*hw_fini)(void *handle); |
| 160 | void (*late_fini)(void *handle); | ||
| 160 | /* handles IP specific hw/sw changes for suspend */ | 161 | /* handles IP specific hw/sw changes for suspend */ |
| 161 | int (*suspend)(void *handle); | 162 | int (*suspend)(void *handle); |
| 162 | /* handles IP specific hw/sw changes for resume */ | 163 | /* handles IP specific hw/sw changes for resume */ |
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h index 32f3e345de08..3493da5c8f0e 100644 --- a/drivers/gpu/drm/amd/include/atombios.h +++ b/drivers/gpu/drm/amd/include/atombios.h | |||
| @@ -5538,6 +5538,78 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V3_5 | |||
| 5538 | ULONG ulReserved[12]; | 5538 | ULONG ulReserved[12]; |
| 5539 | }ATOM_ASIC_PROFILING_INFO_V3_5; | 5539 | }ATOM_ASIC_PROFILING_INFO_V3_5; |
| 5540 | 5540 | ||
| 5541 | /* for Polars10/11 AVFS parameters */ | ||
| 5542 | typedef struct _ATOM_ASIC_PROFILING_INFO_V3_6 | ||
| 5543 | { | ||
| 5544 | ATOM_COMMON_TABLE_HEADER asHeader; | ||
| 5545 | ULONG ulMaxVddc; | ||
| 5546 | ULONG ulMinVddc; | ||
| 5547 | USHORT usLkgEuseIndex; | ||
| 5548 | UCHAR ucLkgEfuseBitLSB; | ||
| 5549 | UCHAR ucLkgEfuseLength; | ||
| 5550 | ULONG ulLkgEncodeLn_MaxDivMin; | ||
| 5551 | ULONG ulLkgEncodeMax; | ||
| 5552 | ULONG ulLkgEncodeMin; | ||
| 5553 | EFUSE_LINEAR_FUNC_PARAM sRoFuse; | ||
| 5554 | ULONG ulEvvDefaultVddc; | ||
| 5555 | ULONG ulEvvNoCalcVddc; | ||
| 5556 | ULONG ulSpeed_Model; | ||
| 5557 | ULONG ulSM_A0; | ||
| 5558 | ULONG ulSM_A1; | ||
| 5559 | ULONG ulSM_A2; | ||
| 5560 | ULONG ulSM_A3; | ||
| 5561 | ULONG ulSM_A4; | ||
| 5562 | ULONG ulSM_A5; | ||
| 5563 | ULONG ulSM_A6; | ||
| 5564 | ULONG ulSM_A7; | ||
| 5565 | UCHAR ucSM_A0_sign; | ||
| 5566 | UCHAR ucSM_A1_sign; | ||
| 5567 | UCHAR ucSM_A2_sign; | ||
| 5568 | UCHAR ucSM_A3_sign; | ||
| 5569 | UCHAR ucSM_A4_sign; | ||
| 5570 | UCHAR ucSM_A5_sign; | ||
| 5571 | UCHAR ucSM_A6_sign; | ||
| 5572 | UCHAR ucSM_A7_sign; | ||
| 5573 | ULONG ulMargin_RO_a; | ||
| 5574 | ULONG ulMargin_RO_b; | ||
| 5575 | ULONG ulMargin_RO_c; | ||
| 5576 | ULONG ulMargin_fixed; | ||
| 5577 | ULONG ulMargin_Fmax_mean; | ||
| 5578 | ULONG ulMargin_plat_mean; | ||
| 5579 | ULONG ulMargin_Fmax_sigma; | ||
| 5580 | ULONG ulMargin_plat_sigma; | ||
| 5581 | ULONG ulMargin_DC_sigma; | ||
| 5582 | ULONG ulLoadLineSlop; | ||
| 5583 | ULONG ulaTDClimitPerDPM[8]; | ||
| 5584 | ULONG ulaNoCalcVddcPerDPM[8]; | ||
| 5585 | ULONG ulAVFS_meanNsigma_Acontant0; | ||
| 5586 | ULONG ulAVFS_meanNsigma_Acontant1; | ||
| 5587 | ULONG ulAVFS_meanNsigma_Acontant2; | ||
| 5588 | USHORT usAVFS_meanNsigma_DC_tol_sigma; | ||
| 5589 | USHORT usAVFS_meanNsigma_Platform_mean; | ||
| 5590 | USHORT usAVFS_meanNsigma_Platform_sigma; | ||
| 5591 | ULONG ulGB_VDROOP_TABLE_CKSOFF_a0; | ||
| 5592 | ULONG ulGB_VDROOP_TABLE_CKSOFF_a1; | ||
| 5593 | ULONG ulGB_VDROOP_TABLE_CKSOFF_a2; | ||
| 5594 | ULONG ulGB_VDROOP_TABLE_CKSON_a0; | ||
| 5595 | ULONG ulGB_VDROOP_TABLE_CKSON_a1; | ||
| 5596 | ULONG ulGB_VDROOP_TABLE_CKSON_a2; | ||
| 5597 | ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_m1; | ||
| 5598 | USHORT usAVFSGB_FUSE_TABLE_CKSOFF_m2; | ||
| 5599 | ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_b; | ||
| 5600 | ULONG ulAVFSGB_FUSE_TABLE_CKSON_m1; | ||
| 5601 | USHORT usAVFSGB_FUSE_TABLE_CKSON_m2; | ||
| 5602 | ULONG ulAVFSGB_FUSE_TABLE_CKSON_b; | ||
| 5603 | USHORT usMaxVoltage_0_25mv; | ||
| 5604 | UCHAR ucEnableGB_VDROOP_TABLE_CKSOFF; | ||
| 5605 | UCHAR ucEnableGB_VDROOP_TABLE_CKSON; | ||
| 5606 | UCHAR ucEnableGB_FUSE_TABLE_CKSOFF; | ||
| 5607 | UCHAR ucEnableGB_FUSE_TABLE_CKSON; | ||
| 5608 | USHORT usPSM_Age_ComFactor; | ||
| 5609 | UCHAR ucEnableApplyAVFS_CKS_OFF_Voltage; | ||
| 5610 | UCHAR ucReserved; | ||
| 5611 | }ATOM_ASIC_PROFILING_INFO_V3_6; | ||
| 5612 | |||
| 5541 | 5613 | ||
| 5542 | typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{ | 5614 | typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{ |
| 5543 | ULONG ulMaxSclkFreq; | 5615 | ULONG ulMaxSclkFreq; |
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index a461e155a160..7464daf89ca1 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h | |||
| @@ -581,6 +581,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device, | |||
| 581 | enum cgs_ucode_id type, | 581 | enum cgs_ucode_id type, |
| 582 | struct cgs_firmware_info *info); | 582 | struct cgs_firmware_info *info); |
| 583 | 583 | ||
| 584 | typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device, | ||
| 585 | enum cgs_ucode_id type); | ||
| 586 | |||
| 584 | typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device, | 587 | typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device, |
| 585 | enum amd_ip_block_type block_type, | 588 | enum amd_ip_block_type block_type, |
| 586 | enum amd_powergating_state state); | 589 | enum amd_powergating_state state); |
| @@ -645,6 +648,7 @@ struct cgs_ops { | |||
| 645 | cgs_set_camera_voltages_t set_camera_voltages; | 648 | cgs_set_camera_voltages_t set_camera_voltages; |
| 646 | /* Firmware Info */ | 649 | /* Firmware Info */ |
| 647 | cgs_get_firmware_info get_firmware_info; | 650 | cgs_get_firmware_info get_firmware_info; |
| 651 | cgs_rel_firmware rel_firmware; | ||
| 648 | /* cg pg interface*/ | 652 | /* cg pg interface*/ |
| 649 | cgs_set_powergating_state set_powergating_state; | 653 | cgs_set_powergating_state set_powergating_state; |
| 650 | cgs_set_clockgating_state set_clockgating_state; | 654 | cgs_set_clockgating_state set_clockgating_state; |
| @@ -738,6 +742,8 @@ struct cgs_device | |||
| 738 | CGS_CALL(set_camera_voltages,dev,mask,voltages) | 742 | CGS_CALL(set_camera_voltages,dev,mask,voltages) |
| 739 | #define cgs_get_firmware_info(dev, type, info) \ | 743 | #define cgs_get_firmware_info(dev, type, info) \ |
| 740 | CGS_CALL(get_firmware_info, dev, type, info) | 744 | CGS_CALL(get_firmware_info, dev, type, info) |
| 745 | #define cgs_rel_firmware(dev, type) \ | ||
| 746 | CGS_CALL(rel_firmware, dev, type) | ||
| 741 | #define cgs_set_powergating_state(dev, block_type, state) \ | 747 | #define cgs_set_powergating_state(dev, block_type, state) \ |
| 742 | CGS_CALL(set_powergating_state, dev, block_type, state) | 748 | CGS_CALL(set_powergating_state, dev, block_type, state) |
| 743 | #define cgs_set_clockgating_state(dev, block_type, state) \ | 749 | #define cgs_set_clockgating_state(dev, block_type, state) \ |
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 8e345bfddb69..e629f8a9fe93 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c | |||
| @@ -73,11 +73,14 @@ static int pp_sw_init(void *handle) | |||
| 73 | 73 | ||
| 74 | ret = hwmgr->hwmgr_func->backend_init(hwmgr); | 74 | ret = hwmgr->hwmgr_func->backend_init(hwmgr); |
| 75 | if (ret) | 75 | if (ret) |
| 76 | goto err; | 76 | goto err1; |
| 77 | 77 | ||
| 78 | pr_info("amdgpu: powerplay initialized\n"); | 78 | pr_info("amdgpu: powerplay initialized\n"); |
| 79 | 79 | ||
| 80 | return 0; | 80 | return 0; |
| 81 | err1: | ||
| 82 | if (hwmgr->pptable_func->pptable_fini) | ||
| 83 | hwmgr->pptable_func->pptable_fini(hwmgr); | ||
| 81 | err: | 84 | err: |
| 82 | pr_err("amdgpu: powerplay initialization failed\n"); | 85 | pr_err("amdgpu: powerplay initialization failed\n"); |
| 83 | return ret; | 86 | return ret; |
| @@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle) | |||
| 100 | if (hwmgr->hwmgr_func->backend_fini != NULL) | 103 | if (hwmgr->hwmgr_func->backend_fini != NULL) |
| 101 | ret = hwmgr->hwmgr_func->backend_fini(hwmgr); | 104 | ret = hwmgr->hwmgr_func->backend_fini(hwmgr); |
| 102 | 105 | ||
| 106 | if (hwmgr->pptable_func->pptable_fini) | ||
| 107 | hwmgr->pptable_func->pptable_fini(hwmgr); | ||
| 108 | |||
| 103 | return ret; | 109 | return ret; |
| 104 | } | 110 | } |
| 105 | 111 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c index 46410e3c7349..fb88e4e5d625 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c | |||
| @@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr) | |||
| 58 | pem_unregister_interrupts(eventmgr); | 58 | pem_unregister_interrupts(eventmgr); |
| 59 | 59 | ||
| 60 | pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); | 60 | pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); |
| 61 | |||
| 62 | if (eventmgr != NULL) | ||
| 63 | kfree(eventmgr); | ||
| 64 | } | 61 | } |
| 65 | 62 | ||
| 66 | int eventmgr_init(struct pp_instance *handle) | 63 | int eventmgr_init(struct pp_instance *handle) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index 24a16e49b571..92912ab20944 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c | |||
| @@ -633,6 +633,8 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | |||
| 633 | data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE; | 633 | data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE; |
| 634 | data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE; | 634 | data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE; |
| 635 | 635 | ||
| 636 | data->force_pcie_gen = PP_PCIEGenInvalid; | ||
| 637 | |||
| 636 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, | 638 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, |
| 637 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) | 639 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) |
| 638 | data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2; | 640 | data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2; |
| @@ -1830,7 +1832,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci) | |||
| 1830 | 1832 | ||
| 1831 | PP_ASSERT_WITH_CODE(false, | 1833 | PP_ASSERT_WITH_CODE(false, |
| 1832 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", | 1834 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", |
| 1833 | return vddci_table->entries[i].value); | 1835 | return vddci_table->entries[i-1].value); |
| 1834 | } | 1836 | } |
| 1835 | 1837 | ||
| 1836 | static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, | 1838 | static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index fa208ada6892..efb77eda7508 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | |||
| @@ -306,10 +306,14 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, | |||
| 306 | { | 306 | { |
| 307 | PHM_FUNC_CHECK(hwmgr); | 307 | PHM_FUNC_CHECK(hwmgr); |
| 308 | 308 | ||
| 309 | if (hwmgr->hwmgr_func->store_cc6_data == NULL) | 309 | if (display_config == NULL) |
| 310 | return -EINVAL; | 310 | return -EINVAL; |
| 311 | 311 | ||
| 312 | hwmgr->display_config = *display_config; | 312 | hwmgr->display_config = *display_config; |
| 313 | |||
| 314 | if (hwmgr->hwmgr_func->store_cc6_data == NULL) | ||
| 315 | return -EINVAL; | ||
| 316 | |||
| 313 | /* to do pass other display configuration in furture */ | 317 | /* to do pass other display configuration in furture */ |
| 314 | 318 | ||
| 315 | if (hwmgr->hwmgr_func->store_cc6_data) | 319 | if (hwmgr->hwmgr_func->store_cc6_data) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 1c48917da3cf..20f20e075588 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | |||
| @@ -93,6 +93,13 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr) | |||
| 93 | if (hwmgr == NULL || hwmgr->ps == NULL) | 93 | if (hwmgr == NULL || hwmgr->ps == NULL) |
| 94 | return -EINVAL; | 94 | return -EINVAL; |
| 95 | 95 | ||
| 96 | /* do hwmgr finish*/ | ||
| 97 | kfree(hwmgr->backend); | ||
| 98 | |||
| 99 | kfree(hwmgr->start_thermal_controller.function_list); | ||
| 100 | |||
| 101 | kfree(hwmgr->set_temperature_range.function_list); | ||
| 102 | |||
| 96 | kfree(hwmgr->ps); | 103 | kfree(hwmgr->ps); |
| 97 | kfree(hwmgr); | 104 | kfree(hwmgr); |
| 98 | return 0; | 105 | return 0; |
| @@ -462,7 +469,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u | |||
| 462 | 469 | ||
| 463 | PP_ASSERT_WITH_CODE(false, | 470 | PP_ASSERT_WITH_CODE(false, |
| 464 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", | 471 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", |
| 465 | return vddci_table->entries[i].value); | 472 | return vddci_table->entries[i-1].value); |
| 466 | } | 473 | } |
| 467 | 474 | ||
| 468 | int phm_find_boot_level(void *table, | 475 | int phm_find_boot_level(void *table, |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h index 347fef127ce9..2930a3355948 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h | |||
| @@ -39,6 +39,7 @@ struct phm_ppt_v1_clock_voltage_dependency_record { | |||
| 39 | uint8_t phases; | 39 | uint8_t phases; |
| 40 | uint8_t cks_enable; | 40 | uint8_t cks_enable; |
| 41 | uint8_t cks_voffset; | 41 | uint8_t cks_voffset; |
| 42 | uint32_t sclk_offset; | ||
| 42 | }; | 43 | }; |
| 43 | 44 | ||
| 44 | typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record; | 45 | typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index aa6be033f21b..64ee78f7d41e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c | |||
| @@ -999,7 +999,7 @@ static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, | |||
| 999 | vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), | 999 | vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), |
| 1000 | (dep_table->entries[i].vddc - | 1000 | (dep_table->entries[i].vddc - |
| 1001 | (uint16_t)data->vddc_vddci_delta)); | 1001 | (uint16_t)data->vddc_vddci_delta)); |
| 1002 | *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; | 1002 | *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; |
| 1003 | } | 1003 | } |
| 1004 | 1004 | ||
| 1005 | if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) | 1005 | if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) |
| @@ -1296,7 +1296,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, | |||
| 1296 | } | 1296 | } |
| 1297 | 1297 | ||
| 1298 | mem_level->MclkFrequency = clock; | 1298 | mem_level->MclkFrequency = clock; |
| 1299 | mem_level->StutterEnable = 0; | ||
| 1300 | mem_level->EnabledForThrottle = 1; | 1299 | mem_level->EnabledForThrottle = 1; |
| 1301 | mem_level->EnabledForActivity = 0; | 1300 | mem_level->EnabledForActivity = 0; |
| 1302 | mem_level->UpHyst = 0; | 1301 | mem_level->UpHyst = 0; |
| @@ -1304,7 +1303,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, | |||
| 1304 | mem_level->VoltageDownHyst = 0; | 1303 | mem_level->VoltageDownHyst = 0; |
| 1305 | mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; | 1304 | mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; |
| 1306 | mem_level->StutterEnable = false; | 1305 | mem_level->StutterEnable = false; |
| 1307 | |||
| 1308 | mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; | 1306 | mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; |
| 1309 | 1307 | ||
| 1310 | data->display_timing.num_existing_displays = info.display_count; | 1308 | data->display_timing.num_existing_displays = info.display_count; |
| @@ -1363,7 +1361,7 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) | |||
| 1363 | * a higher state by default such that we are not effected by | 1361 | * a higher state by default such that we are not effected by |
| 1364 | * up threshold or and MCLK DPM latency. | 1362 | * up threshold or and MCLK DPM latency. |
| 1365 | */ | 1363 | */ |
| 1366 | levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target; | 1364 | levels[0].ActivityLevel = 0x1f; |
| 1367 | CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); | 1365 | CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); |
| 1368 | 1366 | ||
| 1369 | data->smc_state_table.MemoryDpmLevelCount = | 1367 | data->smc_state_table.MemoryDpmLevelCount = |
| @@ -1761,12 +1759,9 @@ static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) | |||
| 1761 | 1759 | ||
| 1762 | static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) | 1760 | static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) |
| 1763 | { | 1761 | { |
| 1764 | uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, | 1762 | uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; |
| 1765 | volt_with_cks, value; | ||
| 1766 | uint16_t clock_freq_u16; | ||
| 1767 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); | 1763 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); |
| 1768 | uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, | 1764 | uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; |
| 1769 | volt_offset = 0; | ||
| 1770 | struct phm_ppt_v1_information *table_info = | 1765 | struct phm_ppt_v1_information *table_info = |
| 1771 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 1766 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 1772 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = | 1767 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = |
| @@ -1778,50 +1773,38 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) | |||
| 1778 | * if the part is SS or FF. if RO >= 1660MHz, part is FF. | 1773 | * if the part is SS or FF. if RO >= 1660MHz, part is FF. |
| 1779 | */ | 1774 | */ |
| 1780 | efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, | 1775 | efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 1781 | ixSMU_EFUSE_0 + (146 * 4)); | 1776 | ixSMU_EFUSE_0 + (67 * 4)); |
| 1782 | efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, | ||
| 1783 | ixSMU_EFUSE_0 + (148 * 4)); | ||
| 1784 | efuse &= 0xFF000000; | 1777 | efuse &= 0xFF000000; |
| 1785 | efuse = efuse >> 24; | 1778 | efuse = efuse >> 24; |
| 1786 | efuse2 &= 0xF; | ||
| 1787 | |||
| 1788 | if (efuse2 == 1) | ||
| 1789 | ro = (2300 - 1350) * efuse / 255 + 1350; | ||
| 1790 | else | ||
| 1791 | ro = (2500 - 1000) * efuse / 255 + 1000; | ||
| 1792 | 1779 | ||
| 1793 | if (ro >= 1660) | 1780 | if (hwmgr->chip_id == CHIP_POLARIS10) { |
| 1794 | type = 0; | 1781 | min = 1000; |
| 1795 | else | 1782 | max = 2300; |
| 1796 | type = 1; | 1783 | } else { |
| 1784 | min = 1100; | ||
| 1785 | max = 2100; | ||
| 1786 | } | ||
| 1797 | 1787 | ||
| 1798 | /* Populate Stretch amount */ | 1788 | ro = efuse * (max -min)/255 + min; |
| 1799 | data->smc_state_table.ClockStretcherAmount = stretch_amount; | ||
| 1800 | 1789 | ||
| 1801 | /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ | 1790 | /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ |
| 1802 | for (i = 0; i < sclk_table->count; i++) { | 1791 | for (i = 0; i < sclk_table->count; i++) { |
| 1803 | data->smc_state_table.Sclk_CKS_masterEn0_7 |= | 1792 | data->smc_state_table.Sclk_CKS_masterEn0_7 |= |
| 1804 | sclk_table->entries[i].cks_enable << i; | 1793 | sclk_table->entries[i].cks_enable << i; |
| 1805 | volt_without_cks = (uint32_t)((14041 * | 1794 | |
| 1806 | (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / | 1795 | volt_without_cks = (uint32_t)(((ro - 40) * 1000 - 2753594 - sclk_table->entries[i].clk/100 * 136418 /1000) / \ |
| 1807 | (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); | 1796 | (sclk_table->entries[i].clk/100 * 1132925 /10000 - 242418)/100); |
| 1808 | volt_with_cks = (uint32_t)((13946 * | 1797 | |
| 1809 | (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / | 1798 | volt_with_cks = (uint32_t)((ro * 1000 -2396351 - sclk_table->entries[i].clk/100 * 329021/1000) / \ |
| 1810 | (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); | 1799 | (sclk_table->entries[i].clk/10000 * 649434 /1000 - 18005)/10); |
| 1800 | |||
| 1811 | if (volt_without_cks >= volt_with_cks) | 1801 | if (volt_without_cks >= volt_with_cks) |
| 1812 | volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + | 1802 | volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + |
| 1813 | sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); | 1803 | sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); |
| 1804 | |||
| 1814 | data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; | 1805 | data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; |
| 1815 | } | 1806 | } |
| 1816 | 1807 | ||
| 1817 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, | ||
| 1818 | STRETCH_ENABLE, 0x0); | ||
| 1819 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, | ||
| 1820 | masterReset, 0x1); | ||
| 1821 | /* PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, staticEnable, 0x1); */ | ||
| 1822 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, | ||
| 1823 | masterReset, 0x0); | ||
| 1824 | |||
| 1825 | /* Populate CKS Lookup Table */ | 1808 | /* Populate CKS Lookup Table */ |
| 1826 | if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) | 1809 | if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) |
| 1827 | stretch_amount2 = 0; | 1810 | stretch_amount2 = 0; |
| @@ -1835,69 +1818,6 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) | |||
| 1835 | return -EINVAL); | 1818 | return -EINVAL); |
| 1836 | } | 1819 | } |
| 1837 | 1820 | ||
| 1838 | value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, | ||
| 1839 | ixPWR_CKS_CNTL); | ||
| 1840 | value &= 0xFFC2FF87; | ||
| 1841 | data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq = | ||
| 1842 | polaris10_clock_stretcher_lookup_table[stretch_amount2][0]; | ||
| 1843 | data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq = | ||
| 1844 | polaris10_clock_stretcher_lookup_table[stretch_amount2][1]; | ||
| 1845 | clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table. | ||
| 1846 | GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].SclkSetting.SclkFrequency) / 100); | ||
| 1847 | if (polaris10_clock_stretcher_lookup_table[stretch_amount2][0] < clock_freq_u16 | ||
| 1848 | && polaris10_clock_stretcher_lookup_table[stretch_amount2][1] > clock_freq_u16) { | ||
| 1849 | /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */ | ||
| 1850 | value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 16; | ||
| 1851 | /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */ | ||
| 1852 | value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][2]) << 18; | ||
| 1853 | /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */ | ||
| 1854 | value |= (polaris10_clock_stretch_amount_conversion | ||
| 1855 | [polaris10_clock_stretcher_lookup_table[stretch_amount2][3]] | ||
| 1856 | [stretch_amount]) << 3; | ||
| 1857 | } | ||
| 1858 | CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq); | ||
| 1859 | CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq); | ||
| 1860 | data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting = | ||
| 1861 | polaris10_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F; | ||
| 1862 | data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |= | ||
| 1863 | (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 7; | ||
| 1864 | |||
| 1865 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | ||
| 1866 | ixPWR_CKS_CNTL, value); | ||
| 1867 | |||
| 1868 | /* Populate DDT Lookup Table */ | ||
| 1869 | for (i = 0; i < 4; i++) { | ||
| 1870 | /* Assign the minimum and maximum VID stored | ||
| 1871 | * in the last row of Clock Stretcher Voltage Table. | ||
| 1872 | */ | ||
| 1873 | data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].minVID = | ||
| 1874 | (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][2]; | ||
| 1875 | data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].maxVID = | ||
| 1876 | (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][3]; | ||
| 1877 | /* Loop through each SCLK and check the frequency | ||
| 1878 | * to see if it lies within the frequency for clock stretcher. | ||
| 1879 | */ | ||
| 1880 | for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) { | ||
| 1881 | cks_setting = 0; | ||
| 1882 | clock_freq = PP_SMC_TO_HOST_UL( | ||
| 1883 | data->smc_state_table.GraphicsLevel[j].SclkSetting.SclkFrequency); | ||
| 1884 | /* Check the allowed frequency against the sclk level[j]. | ||
| 1885 | * Sclk's endianness has already been converted, | ||
| 1886 | * and it's in 10Khz unit, | ||
| 1887 | * as opposed to Data table, which is in Mhz unit. | ||
| 1888 | */ | ||
| 1889 | if (clock_freq >= (polaris10_clock_stretcher_ddt_table[type][i][0]) * 100) { | ||
| 1890 | cks_setting |= 0x2; | ||
| 1891 | if (clock_freq < (polaris10_clock_stretcher_ddt_table[type][i][1]) * 100) | ||
| 1892 | cks_setting |= 0x1; | ||
| 1893 | } | ||
| 1894 | data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting | ||
| 1895 | |= cks_setting << (j * 2); | ||
| 1896 | } | ||
| 1897 | CONVERT_FROM_HOST_TO_SMC_US( | ||
| 1898 | data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting); | ||
| 1899 | } | ||
| 1900 | |||
| 1901 | value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); | 1821 | value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); |
| 1902 | value &= 0xFFFFFFFE; | 1822 | value &= 0xFFFFFFFE; |
| 1903 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); | 1823 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); |
| @@ -1956,6 +1876,90 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, | |||
| 1956 | return 0; | 1876 | return 0; |
| 1957 | } | 1877 | } |
| 1958 | 1878 | ||
| 1879 | |||
| 1880 | int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) | ||
| 1881 | { | ||
| 1882 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); | ||
| 1883 | SMU74_Discrete_DpmTable *table = &(data->smc_state_table); | ||
| 1884 | int result = 0; | ||
| 1885 | struct pp_atom_ctrl__avfs_parameters avfs_params = {0}; | ||
| 1886 | AVFS_meanNsigma_t AVFS_meanNsigma = { {0} }; | ||
| 1887 | AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} }; | ||
| 1888 | uint32_t tmp, i; | ||
| 1889 | struct pp_smumgr *smumgr = hwmgr->smumgr; | ||
| 1890 | struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); | ||
| 1891 | |||
| 1892 | struct phm_ppt_v1_information *table_info = | ||
| 1893 | (struct phm_ppt_v1_information *)hwmgr->pptable; | ||
| 1894 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = | ||
| 1895 | table_info->vdd_dep_on_sclk; | ||
| 1896 | |||
| 1897 | |||
| 1898 | if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) | ||
| 1899 | return result; | ||
| 1900 | |||
| 1901 | result = atomctrl_get_avfs_information(hwmgr, &avfs_params); | ||
| 1902 | |||
| 1903 | if (0 == result) { | ||
| 1904 | table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0); | ||
| 1905 | table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1); | ||
| 1906 | table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2); | ||
| 1907 | table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0); | ||
| 1908 | table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1); | ||
| 1909 | table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2); | ||
| 1910 | table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1); | ||
| 1911 | table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2); | ||
| 1912 | table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b); | ||
| 1913 | table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24; | ||
| 1914 | table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12; | ||
| 1915 | table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1); | ||
| 1916 | table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2); | ||
| 1917 | table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b); | ||
| 1918 | table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24; | ||
| 1919 | table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12; | ||
| 1920 | table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv); | ||
| 1921 | AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0); | ||
| 1922 | AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1); | ||
| 1923 | AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2); | ||
| 1924 | AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma); | ||
| 1925 | AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean); | ||
| 1926 | AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor); | ||
| 1927 | AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma); | ||
| 1928 | |||
| 1929 | for (i = 0; i < NUM_VFT_COLUMNS; i++) { | ||
| 1930 | AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625); | ||
| 1931 | AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100); | ||
| 1932 | } | ||
| 1933 | |||
| 1934 | result = polaris10_read_smc_sram_dword(smumgr, | ||
| 1935 | SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma), | ||
| 1936 | &tmp, data->sram_end); | ||
| 1937 | |||
| 1938 | polaris10_copy_bytes_to_smc(smumgr, | ||
| 1939 | tmp, | ||
| 1940 | (uint8_t *)&AVFS_meanNsigma, | ||
| 1941 | sizeof(AVFS_meanNsigma_t), | ||
| 1942 | data->sram_end); | ||
| 1943 | |||
| 1944 | result = polaris10_read_smc_sram_dword(smumgr, | ||
| 1945 | SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable), | ||
| 1946 | &tmp, data->sram_end); | ||
| 1947 | polaris10_copy_bytes_to_smc(smumgr, | ||
| 1948 | tmp, | ||
| 1949 | (uint8_t *)&AVFS_SclkOffset, | ||
| 1950 | sizeof(AVFS_Sclk_Offset_t), | ||
| 1951 | data->sram_end); | ||
| 1952 | |||
| 1953 | data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) | | ||
| 1954 | (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) | | ||
| 1955 | (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) | | ||
| 1956 | (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT); | ||
| 1957 | data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false; | ||
| 1958 | } | ||
| 1959 | return result; | ||
| 1960 | } | ||
| 1961 | |||
| 1962 | |||
| 1959 | /** | 1963 | /** |
| 1960 | * Initializes the SMC table and uploads it | 1964 | * Initializes the SMC table and uploads it |
| 1961 | * | 1965 | * |
| @@ -2056,6 +2060,10 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) | |||
| 2056 | "Failed to populate Clock Stretcher Data Table!", | 2060 | "Failed to populate Clock Stretcher Data Table!", |
| 2057 | return result); | 2061 | return result); |
| 2058 | } | 2062 | } |
| 2063 | |||
| 2064 | result = polaris10_populate_avfs_parameters(hwmgr); | ||
| 2065 | PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;); | ||
| 2066 | |||
| 2059 | table->CurrSclkPllRange = 0xff; | 2067 | table->CurrSclkPllRange = 0xff; |
| 2060 | table->GraphicsVoltageChangeEnable = 1; | 2068 | table->GraphicsVoltageChangeEnable = 1; |
| 2061 | table->GraphicsThermThrottleEnable = 1; | 2069 | table->GraphicsThermThrottleEnable = 1; |
| @@ -2252,6 +2260,9 @@ static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) | |||
| 2252 | static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | 2260 | static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) |
| 2253 | { | 2261 | { |
| 2254 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); | 2262 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); |
| 2263 | uint32_t soft_register_value = 0; | ||
| 2264 | uint32_t handshake_disables_offset = data->soft_regs_start | ||
| 2265 | + offsetof(SMU74_SoftRegisters, HandshakeDisables); | ||
| 2255 | 2266 | ||
| 2256 | /* enable SCLK dpm */ | 2267 | /* enable SCLK dpm */ |
| 2257 | if (!data->sclk_dpm_key_disabled) | 2268 | if (!data->sclk_dpm_key_disabled) |
| @@ -2262,6 +2273,12 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |||
| 2262 | 2273 | ||
| 2263 | /* enable MCLK dpm */ | 2274 | /* enable MCLK dpm */ |
| 2264 | if (0 == data->mclk_dpm_key_disabled) { | 2275 | if (0 == data->mclk_dpm_key_disabled) { |
| 2276 | /* Disable UVD - SMU handshake for MCLK. */ | ||
| 2277 | soft_register_value = cgs_read_ind_register(hwmgr->device, | ||
| 2278 | CGS_IND_REG__SMC, handshake_disables_offset); | ||
| 2279 | soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE; | ||
| 2280 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | ||
| 2281 | handshake_disables_offset, soft_register_value); | ||
| 2265 | 2282 | ||
| 2266 | PP_ASSERT_WITH_CODE( | 2283 | PP_ASSERT_WITH_CODE( |
| 2267 | (0 == smum_send_msg_to_smc(hwmgr->smumgr, | 2284 | (0 == smum_send_msg_to_smc(hwmgr->smumgr, |
| @@ -2269,7 +2286,6 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |||
| 2269 | "Failed to enable MCLK DPM during DPM Start Function!", | 2286 | "Failed to enable MCLK DPM during DPM Start Function!", |
| 2270 | return -1); | 2287 | return -1); |
| 2271 | 2288 | ||
| 2272 | |||
| 2273 | PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); | 2289 | PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); |
| 2274 | 2290 | ||
| 2275 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); | 2291 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); |
| @@ -2606,6 +2622,7 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) | |||
| 2606 | 2622 | ||
| 2607 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | 2623 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
| 2608 | PHM_PlatformCaps_FanSpeedInTableIsRPM); | 2624 | PHM_PlatformCaps_FanSpeedInTableIsRPM); |
| 2625 | |||
| 2609 | if (hwmgr->chip_id == CHIP_POLARIS11) | 2626 | if (hwmgr->chip_id == CHIP_POLARIS11) |
| 2610 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | 2627 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
| 2611 | PHM_PlatformCaps_SPLLShutdownSupport); | 2628 | PHM_PlatformCaps_SPLLShutdownSupport); |
| @@ -2938,6 +2955,11 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | |||
| 2938 | data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE; | 2955 | data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE; |
| 2939 | data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE; | 2956 | data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE; |
| 2940 | 2957 | ||
| 2958 | data->enable_tdc_limit_feature = true; | ||
| 2959 | data->enable_pkg_pwr_tracking_feature = true; | ||
| 2960 | data->force_pcie_gen = PP_PCIEGenInvalid; | ||
| 2961 | data->mclk_stutter_mode_threshold = 40000; | ||
| 2962 | |||
| 2941 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, | 2963 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, |
| 2942 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) | 2964 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) |
| 2943 | data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; | 2965 | data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; |
| @@ -2962,6 +2984,10 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | |||
| 2962 | data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; | 2984 | data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; |
| 2963 | } | 2985 | } |
| 2964 | 2986 | ||
| 2987 | if (table_info->cac_dtp_table->usClockStretchAmount != 0) | ||
| 2988 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | ||
| 2989 | PHM_PlatformCaps_ClockStretcher); | ||
| 2990 | |||
| 2965 | polaris10_set_features_platform_caps(hwmgr); | 2991 | polaris10_set_features_platform_caps(hwmgr); |
| 2966 | 2992 | ||
| 2967 | polaris10_init_dpm_defaults(hwmgr); | 2993 | polaris10_init_dpm_defaults(hwmgr); |
| @@ -3520,10 +3546,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, | |||
| 3520 | ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; | 3546 | ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; |
| 3521 | ATOM_Tonga_POWERPLAYTABLE *powerplay_table = | 3547 | ATOM_Tonga_POWERPLAYTABLE *powerplay_table = |
| 3522 | (ATOM_Tonga_POWERPLAYTABLE *)pp_table; | 3548 | (ATOM_Tonga_POWERPLAYTABLE *)pp_table; |
| 3523 | ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = | 3549 | PPTable_Generic_SubTable_Header *sclk_dep_table = |
| 3524 | (ATOM_Tonga_SCLK_Dependency_Table *) | 3550 | (PPTable_Generic_SubTable_Header *) |
| 3525 | (((unsigned long)powerplay_table) + | 3551 | (((unsigned long)powerplay_table) + |
| 3526 | le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); | 3552 | le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); |
| 3553 | |||
| 3527 | ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = | 3554 | ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = |
| 3528 | (ATOM_Tonga_MCLK_Dependency_Table *) | 3555 | (ATOM_Tonga_MCLK_Dependency_Table *) |
| 3529 | (((unsigned long)powerplay_table) + | 3556 | (((unsigned long)powerplay_table) + |
| @@ -3575,7 +3602,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, | |||
| 3575 | /* Performance levels are arranged from low to high. */ | 3602 | /* Performance levels are arranged from low to high. */ |
| 3576 | performance_level->memory_clock = mclk_dep_table->entries | 3603 | performance_level->memory_clock = mclk_dep_table->entries |
| 3577 | [state_entry->ucMemoryClockIndexLow].ulMclk; | 3604 | [state_entry->ucMemoryClockIndexLow].ulMclk; |
| 3578 | performance_level->engine_clock = sclk_dep_table->entries | 3605 | if (sclk_dep_table->ucRevId == 0) |
| 3606 | performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries | ||
| 3607 | [state_entry->ucEngineClockIndexLow].ulSclk; | ||
| 3608 | else if (sclk_dep_table->ucRevId == 1) | ||
| 3609 | performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries | ||
| 3579 | [state_entry->ucEngineClockIndexLow].ulSclk; | 3610 | [state_entry->ucEngineClockIndexLow].ulSclk; |
| 3580 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, | 3611 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, |
| 3581 | state_entry->ucPCIEGenLow); | 3612 | state_entry->ucPCIEGenLow); |
| @@ -3586,8 +3617,14 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, | |||
| 3586 | [polaris10_power_state->performance_level_count++]); | 3617 | [polaris10_power_state->performance_level_count++]); |
| 3587 | performance_level->memory_clock = mclk_dep_table->entries | 3618 | performance_level->memory_clock = mclk_dep_table->entries |
| 3588 | [state_entry->ucMemoryClockIndexHigh].ulMclk; | 3619 | [state_entry->ucMemoryClockIndexHigh].ulMclk; |
| 3589 | performance_level->engine_clock = sclk_dep_table->entries | 3620 | |
| 3621 | if (sclk_dep_table->ucRevId == 0) | ||
| 3622 | performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries | ||
| 3590 | [state_entry->ucEngineClockIndexHigh].ulSclk; | 3623 | [state_entry->ucEngineClockIndexHigh].ulSclk; |
| 3624 | else if (sclk_dep_table->ucRevId == 1) | ||
| 3625 | performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries | ||
| 3626 | [state_entry->ucEngineClockIndexHigh].ulSclk; | ||
| 3627 | |||
| 3591 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, | 3628 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, |
| 3592 | state_entry->ucPCIEGenHigh); | 3629 | state_entry->ucPCIEGenHigh); |
| 3593 | performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, | 3630 | performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, |
| @@ -3645,7 +3682,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr, | |||
| 3645 | switch (state->classification.ui_label) { | 3682 | switch (state->classification.ui_label) { |
| 3646 | case PP_StateUILabel_Performance: | 3683 | case PP_StateUILabel_Performance: |
| 3647 | data->use_pcie_performance_levels = true; | 3684 | data->use_pcie_performance_levels = true; |
| 3648 | |||
| 3649 | for (i = 0; i < ps->performance_level_count; i++) { | 3685 | for (i = 0; i < ps->performance_level_count; i++) { |
| 3650 | if (data->pcie_gen_performance.max < | 3686 | if (data->pcie_gen_performance.max < |
| 3651 | ps->performance_levels[i].pcie_gen) | 3687 | ps->performance_levels[i].pcie_gen) |
| @@ -3661,7 +3697,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr, | |||
| 3661 | ps->performance_levels[i].pcie_lane) | 3697 | ps->performance_levels[i].pcie_lane) |
| 3662 | data->pcie_lane_performance.max = | 3698 | data->pcie_lane_performance.max = |
| 3663 | ps->performance_levels[i].pcie_lane; | 3699 | ps->performance_levels[i].pcie_lane; |
| 3664 | |||
| 3665 | if (data->pcie_lane_performance.min > | 3700 | if (data->pcie_lane_performance.min > |
| 3666 | ps->performance_levels[i].pcie_lane) | 3701 | ps->performance_levels[i].pcie_lane) |
| 3667 | data->pcie_lane_performance.min = | 3702 | data->pcie_lane_performance.min = |
| @@ -4187,12 +4222,9 @@ int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) | |||
| 4187 | { | 4222 | { |
| 4188 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); | 4223 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); |
| 4189 | uint32_t mm_boot_level_offset, mm_boot_level_value; | 4224 | uint32_t mm_boot_level_offset, mm_boot_level_value; |
| 4190 | struct phm_ppt_v1_information *table_info = | ||
| 4191 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | ||
| 4192 | 4225 | ||
| 4193 | if (!bgate) { | 4226 | if (!bgate) { |
| 4194 | data->smc_state_table.SamuBootLevel = | 4227 | data->smc_state_table.SamuBootLevel = 0; |
| 4195 | (uint8_t) (table_info->mm_dep_table->count - 1); | ||
| 4196 | mm_boot_level_offset = data->dpm_table_start + | 4228 | mm_boot_level_offset = data->dpm_table_start + |
| 4197 | offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); | 4229 | offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); |
| 4198 | mm_boot_level_offset /= 4; | 4230 | mm_boot_level_offset /= 4; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h index beedf35cbfa6..d717789441f5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h | |||
| @@ -312,6 +312,9 @@ struct polaris10_hwmgr { | |||
| 312 | 312 | ||
| 313 | /* soft pptable for re-uploading into smu */ | 313 | /* soft pptable for re-uploading into smu */ |
| 314 | void *soft_pp_table; | 314 | void *soft_pp_table; |
| 315 | |||
| 316 | uint32_t avfs_vdroop_override_setting; | ||
| 317 | bool apply_avfs_cks_off_voltage; | ||
| 315 | }; | 318 | }; |
| 316 | 319 | ||
| 317 | /* To convert to Q8.8 format for firmware */ | 320 | /* To convert to Q8.8 format for firmware */ |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c index 0b99ab3ba0c5..ae96f14b827c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c | |||
| @@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) | |||
| 286 | 286 | ||
| 287 | if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, | 287 | if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, |
| 288 | (uint8_t *)&data->power_tune_table, | 288 | (uint8_t *)&data->power_tune_table, |
| 289 | sizeof(struct SMU74_Discrete_PmFuses), data->sram_end)) | 289 | (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end)) |
| 290 | PP_ASSERT_WITH_CODE(false, | 290 | PP_ASSERT_WITH_CODE(false, |
| 291 | "Attempt to download PmFuseTable Failed!", | 291 | "Attempt to download PmFuseTable Failed!", |
| 292 | return -EINVAL); | 292 | return -EINVAL); |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c index aba167f7d167..b206632d4650 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c | |||
| @@ -625,10 +625,14 @@ static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr, | |||
| 625 | int ret; | 625 | int ret; |
| 626 | struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); | 626 | struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); |
| 627 | struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); | 627 | struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); |
| 628 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); | ||
| 628 | 629 | ||
| 629 | if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS) | 630 | if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) |
| 630 | return 0; | 631 | return 0; |
| 631 | 632 | ||
| 633 | ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | ||
| 634 | PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); | ||
| 635 | |||
| 632 | ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? | 636 | ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? |
| 633 | 0 : -1; | 637 | 0 : -1; |
| 634 | 638 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c index 58742e0d1492..a3c38bbd1e94 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c | |||
| @@ -44,6 +44,20 @@ bool acpi_atcs_functions_supported(void *device, uint32_t index) | |||
| 44 | return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false; | 44 | return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | bool acpi_atcs_notify_pcie_device_ready(void *device) | ||
| 48 | { | ||
| 49 | int32_t temp_buffer = 1; | ||
| 50 | |||
| 51 | return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS, | ||
| 52 | ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, | ||
| 53 | &temp_buffer, | ||
| 54 | NULL, | ||
| 55 | 0, | ||
| 56 | sizeof(temp_buffer), | ||
| 57 | 0); | ||
| 58 | } | ||
| 59 | |||
| 60 | |||
| 47 | int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) | 61 | int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) |
| 48 | { | 62 | { |
| 49 | struct atcs_pref_req_input atcs_input; | 63 | struct atcs_pref_req_input atcs_input; |
| @@ -52,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) | |||
| 52 | int result; | 66 | int result; |
| 53 | struct cgs_system_info info = {0}; | 67 | struct cgs_system_info info = {0}; |
| 54 | 68 | ||
| 55 | if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST)) | 69 | if( 0 != acpi_atcs_notify_pcie_device_ready(device)) |
| 56 | return -EINVAL; | 70 | return -EINVAL; |
| 57 | 71 | ||
| 58 | info.size = sizeof(struct cgs_system_info); | 72 | info.size = sizeof(struct cgs_system_info); |
| @@ -77,7 +91,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) | |||
| 77 | ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, | 91 | ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, |
| 78 | &atcs_input, | 92 | &atcs_input, |
| 79 | &atcs_output, | 93 | &atcs_output, |
| 80 | 0, | 94 | 1, |
| 81 | sizeof(atcs_input), | 95 | sizeof(atcs_input), |
| 82 | sizeof(atcs_output)); | 96 | sizeof(atcs_output)); |
| 83 | if (result != 0) | 97 | if (result != 0) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index da9f5f1b6dc2..bf4e18fd3872 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | |||
| @@ -1302,3 +1302,46 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr | |||
| 1302 | 1302 | ||
| 1303 | return 0; | 1303 | return 0; |
| 1304 | } | 1304 | } |
| 1305 | |||
| 1306 | int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param) | ||
| 1307 | { | ||
| 1308 | ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL; | ||
| 1309 | |||
| 1310 | if (param == NULL) | ||
| 1311 | return -EINVAL; | ||
| 1312 | |||
| 1313 | profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *) | ||
| 1314 | cgs_atom_get_data_table(hwmgr->device, | ||
| 1315 | GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), | ||
| 1316 | NULL, NULL, NULL); | ||
| 1317 | if (!profile) | ||
| 1318 | return -1; | ||
| 1319 | |||
| 1320 | param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0; | ||
| 1321 | param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1; | ||
| 1322 | param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2; | ||
| 1323 | param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma; | ||
| 1324 | param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean; | ||
| 1325 | param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma; | ||
| 1326 | param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0; | ||
| 1327 | param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1; | ||
| 1328 | param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2; | ||
| 1329 | param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0; | ||
| 1330 | param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1; | ||
| 1331 | param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2; | ||
| 1332 | param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1; | ||
| 1333 | param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2; | ||
| 1334 | param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b; | ||
| 1335 | param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1; | ||
| 1336 | param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2; | ||
| 1337 | param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b; | ||
| 1338 | param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv; | ||
| 1339 | param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF; | ||
| 1340 | param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON; | ||
| 1341 | param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF; | ||
| 1342 | param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON; | ||
| 1343 | param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor; | ||
| 1344 | param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage; | ||
| 1345 | |||
| 1346 | return 0; | ||
| 1347 | } | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index d24ebb566905..248c5db5f380 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h | |||
| @@ -250,6 +250,35 @@ struct pp_atomctrl_gpio_pin_assignment { | |||
| 250 | }; | 250 | }; |
| 251 | typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment; | 251 | typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment; |
| 252 | 252 | ||
| 253 | struct pp_atom_ctrl__avfs_parameters { | ||
| 254 | uint32_t ulAVFS_meanNsigma_Acontant0; | ||
| 255 | uint32_t ulAVFS_meanNsigma_Acontant1; | ||
| 256 | uint32_t ulAVFS_meanNsigma_Acontant2; | ||
| 257 | uint16_t usAVFS_meanNsigma_DC_tol_sigma; | ||
| 258 | uint16_t usAVFS_meanNsigma_Platform_mean; | ||
| 259 | uint16_t usAVFS_meanNsigma_Platform_sigma; | ||
| 260 | uint32_t ulGB_VDROOP_TABLE_CKSOFF_a0; | ||
| 261 | uint32_t ulGB_VDROOP_TABLE_CKSOFF_a1; | ||
| 262 | uint32_t ulGB_VDROOP_TABLE_CKSOFF_a2; | ||
| 263 | uint32_t ulGB_VDROOP_TABLE_CKSON_a0; | ||
| 264 | uint32_t ulGB_VDROOP_TABLE_CKSON_a1; | ||
| 265 | uint32_t ulGB_VDROOP_TABLE_CKSON_a2; | ||
| 266 | uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_m1; | ||
| 267 | uint16_t usAVFSGB_FUSE_TABLE_CKSOFF_m2; | ||
| 268 | uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_b; | ||
| 269 | uint32_t ulAVFSGB_FUSE_TABLE_CKSON_m1; | ||
| 270 | uint16_t usAVFSGB_FUSE_TABLE_CKSON_m2; | ||
| 271 | uint32_t ulAVFSGB_FUSE_TABLE_CKSON_b; | ||
| 272 | uint16_t usMaxVoltage_0_25mv; | ||
| 273 | uint8_t ucEnableGB_VDROOP_TABLE_CKSOFF; | ||
| 274 | uint8_t ucEnableGB_VDROOP_TABLE_CKSON; | ||
| 275 | uint8_t ucEnableGB_FUSE_TABLE_CKSOFF; | ||
| 276 | uint8_t ucEnableGB_FUSE_TABLE_CKSON; | ||
| 277 | uint16_t usPSM_Age_ComFactor; | ||
| 278 | uint8_t ucEnableApplyAVFS_CKS_OFF_Voltage; | ||
| 279 | uint8_t ucReserved; | ||
| 280 | }; | ||
| 281 | |||
| 253 | extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); | 282 | extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); |
| 254 | extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); | 283 | extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); |
| 255 | extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); | 284 | extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); |
| @@ -278,5 +307,8 @@ extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clo | |||
| 278 | extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, | 307 | extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, |
| 279 | uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); | 308 | uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); |
| 280 | extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table); | 309 | extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table); |
| 310 | |||
| 311 | extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param); | ||
| 312 | |||
| 281 | #endif | 313 | #endif |
| 282 | 314 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 16fed487973b..233eb7f36c1d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c | |||
| @@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) | |||
| 2847 | } | 2847 | } |
| 2848 | } | 2848 | } |
| 2849 | 2849 | ||
| 2850 | /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ | ||
| 2851 | for (i = 0; i < allowed_vdd_sclk_table->count; i++) { | ||
| 2852 | data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc; | ||
| 2853 | /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */ | ||
| 2854 | /* param1 is for corresponding std voltage */ | ||
| 2855 | data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; | ||
| 2856 | } | ||
| 2857 | data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; | ||
| 2858 | |||
| 2859 | if (NULL != allowed_vdd_mclk_table) { | ||
| 2860 | /* Initialize Vddci DPM table based on allow Mclk values */ | ||
| 2861 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { | ||
| 2862 | data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci; | ||
| 2863 | data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1; | ||
| 2864 | data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd; | ||
| 2865 | data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; | ||
| 2866 | } | ||
| 2867 | data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count; | ||
| 2868 | data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; | ||
| 2869 | } | ||
| 2870 | |||
| 2871 | /* setup PCIE gen speed levels*/ | 2850 | /* setup PCIE gen speed levels*/ |
| 2872 | tonga_setup_default_pcie_tables(hwmgr); | 2851 | tonga_setup_default_pcie_tables(hwmgr); |
| 2873 | 2852 | ||
| @@ -4510,6 +4489,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | |||
| 4510 | data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE; | 4489 | data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE; |
| 4511 | data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE; | 4490 | data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE; |
| 4512 | data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE; | 4491 | data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE; |
| 4492 | data->force_pcie_gen = PP_PCIEGenInvalid; | ||
| 4513 | 4493 | ||
| 4514 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, | 4494 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, |
| 4515 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { | 4495 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h index 1b44f4e9b8f5..f127198aafc4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h | |||
| @@ -197,6 +197,22 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Table { | |||
| 197 | ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ | 197 | ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ |
| 198 | } ATOM_Tonga_SCLK_Dependency_Table; | 198 | } ATOM_Tonga_SCLK_Dependency_Table; |
| 199 | 199 | ||
| 200 | typedef struct _ATOM_Polaris_SCLK_Dependency_Record { | ||
| 201 | UCHAR ucVddInd; /* Base voltage */ | ||
| 202 | USHORT usVddcOffset; /* Offset relative to base voltage */ | ||
| 203 | ULONG ulSclk; | ||
| 204 | USHORT usEdcCurrent; | ||
| 205 | UCHAR ucReliabilityTemperature; | ||
| 206 | UCHAR ucCKSVOffsetandDisable; /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */ | ||
| 207 | ULONG ulSclkOffset; | ||
| 208 | } ATOM_Polaris_SCLK_Dependency_Record; | ||
| 209 | |||
| 210 | typedef struct _ATOM_Polaris_SCLK_Dependency_Table { | ||
| 211 | UCHAR ucRevId; | ||
| 212 | UCHAR ucNumEntries; /* Number of entries. */ | ||
| 213 | ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ | ||
| 214 | } ATOM_Polaris_SCLK_Dependency_Table; | ||
| 215 | |||
| 200 | typedef struct _ATOM_Tonga_PCIE_Record { | 216 | typedef struct _ATOM_Tonga_PCIE_Record { |
| 201 | UCHAR ucPCIEGenSpeed; | 217 | UCHAR ucPCIEGenSpeed; |
| 202 | UCHAR usPCIELaneWidth; | 218 | UCHAR usPCIELaneWidth; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c index 10e3630ee39d..671fdb4d615a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c | |||
| @@ -408,41 +408,78 @@ static int get_mclk_voltage_dependency_table( | |||
| 408 | static int get_sclk_voltage_dependency_table( | 408 | static int get_sclk_voltage_dependency_table( |
| 409 | struct pp_hwmgr *hwmgr, | 409 | struct pp_hwmgr *hwmgr, |
| 410 | phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table, | 410 | phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table, |
| 411 | const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table | 411 | const PPTable_Generic_SubTable_Header *sclk_dep_table |
| 412 | ) | 412 | ) |
| 413 | { | 413 | { |
| 414 | uint32_t table_size, i; | 414 | uint32_t table_size, i; |
| 415 | phm_ppt_v1_clock_voltage_dependency_table *sclk_table; | 415 | phm_ppt_v1_clock_voltage_dependency_table *sclk_table; |
| 416 | 416 | ||
| 417 | PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries), | 417 | if (sclk_dep_table->ucRevId < 1) { |
| 418 | "Invalid PowerPlay Table!", return -1); | 418 | const ATOM_Tonga_SCLK_Dependency_Table *tonga_table = |
| 419 | (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table; | ||
| 419 | 420 | ||
| 420 | table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) | 421 | PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries), |
| 421 | * sclk_dep_table->ucNumEntries; | 422 | "Invalid PowerPlay Table!", return -1); |
| 422 | 423 | ||
| 423 | sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) | 424 | table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) |
| 424 | kzalloc(table_size, GFP_KERNEL); | 425 | * tonga_table->ucNumEntries; |
| 425 | 426 | ||
| 426 | if (NULL == sclk_table) | 427 | sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) |
| 427 | return -ENOMEM; | 428 | kzalloc(table_size, GFP_KERNEL); |
| 428 | 429 | ||
| 429 | memset(sclk_table, 0x00, table_size); | 430 | if (NULL == sclk_table) |
| 430 | 431 | return -ENOMEM; | |
| 431 | sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries; | 432 | |
| 432 | 433 | memset(sclk_table, 0x00, table_size); | |
| 433 | for (i = 0; i < sclk_dep_table->ucNumEntries; i++) { | 434 | |
| 434 | sclk_table->entries[i].vddInd = | 435 | sclk_table->count = (uint32_t)tonga_table->ucNumEntries; |
| 435 | sclk_dep_table->entries[i].ucVddInd; | 436 | |
| 436 | sclk_table->entries[i].vdd_offset = | 437 | for (i = 0; i < tonga_table->ucNumEntries; i++) { |
| 437 | sclk_dep_table->entries[i].usVddcOffset; | 438 | sclk_table->entries[i].vddInd = |
| 438 | sclk_table->entries[i].clk = | 439 | tonga_table->entries[i].ucVddInd; |
| 439 | sclk_dep_table->entries[i].ulSclk; | 440 | sclk_table->entries[i].vdd_offset = |
| 440 | sclk_table->entries[i].cks_enable = | 441 | tonga_table->entries[i].usVddcOffset; |
| 441 | (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; | 442 | sclk_table->entries[i].clk = |
| 442 | sclk_table->entries[i].cks_voffset = | 443 | tonga_table->entries[i].ulSclk; |
| 443 | (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F); | 444 | sclk_table->entries[i].cks_enable = |
| 444 | } | 445 | (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; |
| 446 | sclk_table->entries[i].cks_voffset = | ||
| 447 | (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F); | ||
| 448 | } | ||
| 449 | } else { | ||
| 450 | const ATOM_Polaris_SCLK_Dependency_Table *polaris_table = | ||
| 451 | (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table; | ||
| 452 | |||
| 453 | PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries), | ||
| 454 | "Invalid PowerPlay Table!", return -1); | ||
| 455 | |||
| 456 | table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) | ||
| 457 | * polaris_table->ucNumEntries; | ||
| 458 | |||
| 459 | sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) | ||
| 460 | kzalloc(table_size, GFP_KERNEL); | ||
| 445 | 461 | ||
| 462 | if (NULL == sclk_table) | ||
| 463 | return -ENOMEM; | ||
| 464 | |||
| 465 | memset(sclk_table, 0x00, table_size); | ||
| 466 | |||
| 467 | sclk_table->count = (uint32_t)polaris_table->ucNumEntries; | ||
| 468 | |||
| 469 | for (i = 0; i < polaris_table->ucNumEntries; i++) { | ||
| 470 | sclk_table->entries[i].vddInd = | ||
| 471 | polaris_table->entries[i].ucVddInd; | ||
| 472 | sclk_table->entries[i].vdd_offset = | ||
| 473 | polaris_table->entries[i].usVddcOffset; | ||
| 474 | sclk_table->entries[i].clk = | ||
| 475 | polaris_table->entries[i].ulSclk; | ||
| 476 | sclk_table->entries[i].cks_enable = | ||
| 477 | (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; | ||
| 478 | sclk_table->entries[i].cks_voffset = | ||
| 479 | (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F); | ||
| 480 | sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset; | ||
| 481 | } | ||
| 482 | } | ||
| 446 | *pp_tonga_sclk_dep_table = sclk_table; | 483 | *pp_tonga_sclk_dep_table = sclk_table; |
| 447 | 484 | ||
| 448 | return 0; | 485 | return 0; |
| @@ -708,8 +745,8 @@ static int init_clock_voltage_dependency( | |||
| 708 | const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = | 745 | const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = |
| 709 | (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) + | 746 | (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) + |
| 710 | le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); | 747 | le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); |
| 711 | const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = | 748 | const PPTable_Generic_SubTable_Header *sclk_dep_table = |
| 712 | (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) + | 749 | (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) + |
| 713 | le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); | 750 | le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); |
| 714 | const ATOM_Tonga_Hard_Limit_Table *pHardLimits = | 751 | const ATOM_Tonga_Hard_Limit_Table *pHardLimits = |
| 715 | (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) + | 752 | (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) + |
| @@ -1040,48 +1077,44 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) | |||
| 1040 | struct phm_ppt_v1_information *pp_table_information = | 1077 | struct phm_ppt_v1_information *pp_table_information = |
| 1041 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 1078 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 1042 | 1079 | ||
| 1043 | if (NULL != hwmgr->soft_pp_table) { | 1080 | if (NULL != hwmgr->soft_pp_table) |
| 1044 | kfree(hwmgr->soft_pp_table); | ||
| 1045 | hwmgr->soft_pp_table = NULL; | 1081 | hwmgr->soft_pp_table = NULL; |
| 1046 | } | ||
| 1047 | 1082 | ||
| 1048 | if (NULL != pp_table_information->vdd_dep_on_sclk) | 1083 | kfree(pp_table_information->vdd_dep_on_sclk); |
| 1049 | pp_table_information->vdd_dep_on_sclk = NULL; | 1084 | pp_table_information->vdd_dep_on_sclk = NULL; |
| 1050 | 1085 | ||
| 1051 | if (NULL != pp_table_information->vdd_dep_on_mclk) | 1086 | kfree(pp_table_information->vdd_dep_on_mclk); |
| 1052 | pp_table_information->vdd_dep_on_mclk = NULL; | 1087 | pp_table_information->vdd_dep_on_mclk = NULL; |
| 1053 | 1088 | ||
| 1054 | if (NULL != pp_table_information->valid_mclk_values) | 1089 | kfree(pp_table_information->valid_mclk_values); |
| 1055 | pp_table_information->valid_mclk_values = NULL; | 1090 | pp_table_information->valid_mclk_values = NULL; |
| 1056 | 1091 | ||
| 1057 | if (NULL != pp_table_information->valid_sclk_values) | 1092 | kfree(pp_table_information->valid_sclk_values); |
| 1058 | pp_table_information->valid_sclk_values = NULL; | 1093 | pp_table_information->valid_sclk_values = NULL; |
| 1059 | 1094 | ||
| 1060 | if (NULL != pp_table_information->vddc_lookup_table) | 1095 | kfree(pp_table_information->vddc_lookup_table); |
| 1061 | pp_table_information->vddc_lookup_table = NULL; | 1096 | pp_table_information->vddc_lookup_table = NULL; |
| 1062 | 1097 | ||
| 1063 | if (NULL != pp_table_information->vddgfx_lookup_table) | 1098 | kfree(pp_table_information->vddgfx_lookup_table); |
| 1064 | pp_table_information->vddgfx_lookup_table = NULL; | 1099 | pp_table_information->vddgfx_lookup_table = NULL; |
| 1065 | 1100 | ||
| 1066 | if (NULL != pp_table_information->mm_dep_table) | 1101 | kfree(pp_table_information->mm_dep_table); |
| 1067 | pp_table_information->mm_dep_table = NULL; | 1102 | pp_table_information->mm_dep_table = NULL; |
| 1068 | 1103 | ||
| 1069 | if (NULL != pp_table_information->cac_dtp_table) | 1104 | kfree(pp_table_information->cac_dtp_table); |
| 1070 | pp_table_information->cac_dtp_table = NULL; | 1105 | pp_table_information->cac_dtp_table = NULL; |
| 1071 | 1106 | ||
| 1072 | if (NULL != hwmgr->dyn_state.cac_dtp_table) | 1107 | kfree(hwmgr->dyn_state.cac_dtp_table); |
| 1073 | hwmgr->dyn_state.cac_dtp_table = NULL; | 1108 | hwmgr->dyn_state.cac_dtp_table = NULL; |
| 1074 | 1109 | ||
| 1075 | if (NULL != pp_table_information->ppm_parameter_table) | 1110 | kfree(pp_table_information->ppm_parameter_table); |
| 1076 | pp_table_information->ppm_parameter_table = NULL; | 1111 | pp_table_information->ppm_parameter_table = NULL; |
| 1077 | 1112 | ||
| 1078 | if (NULL != pp_table_information->pcie_table) | 1113 | kfree(pp_table_information->pcie_table); |
| 1079 | pp_table_information->pcie_table = NULL; | 1114 | pp_table_information->pcie_table = NULL; |
| 1080 | 1115 | ||
| 1081 | if (NULL != hwmgr->pptable) { | 1116 | kfree(hwmgr->pptable); |
| 1082 | kfree(hwmgr->pptable); | 1117 | hwmgr->pptable = NULL; |
| 1083 | hwmgr->pptable = NULL; | ||
| 1084 | } | ||
| 1085 | 1118 | ||
| 1086 | return result; | 1119 | return result; |
| 1087 | } | 1120 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h index 0c6a413eaa5b..d41d37ab5b7c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | 27 | ||
| 28 | #pragma pack(push, 1) | 28 | #pragma pack(push, 1) |
| 29 | 29 | ||
| 30 | #define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305) | ||
| 30 | 31 | ||
| 31 | #define PPSMC_SWSTATE_FLAG_DC 0x01 | 32 | #define PPSMC_SWSTATE_FLAG_DC 0x01 |
| 32 | #define PPSMC_SWSTATE_FLAG_UVD 0x02 | 33 | #define PPSMC_SWSTATE_FLAG_UVD 0x02 |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h index 3bd5e69b9045..3df5de2cdab0 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h | |||
| @@ -26,3 +26,4 @@ extern bool acpi_atcs_functions_supported(void *device, | |||
| 26 | extern int acpi_pcie_perf_request(void *device, | 26 | extern int acpi_pcie_perf_request(void *device, |
| 27 | uint8_t perf_req, | 27 | uint8_t perf_req, |
| 28 | bool advertise); | 28 | bool advertise); |
| 29 | extern bool acpi_atcs_notify_pcie_device_ready(void *device); | ||
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74.h b/drivers/gpu/drm/amd/powerplay/inc/smu74.h index 1a12d85b8e97..fd10a9fa843d 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu74.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu74.h | |||
| @@ -34,6 +34,30 @@ | |||
| 34 | #define SMU__NUM_LCLK_DPM_LEVELS 8 | 34 | #define SMU__NUM_LCLK_DPM_LEVELS 8 |
| 35 | #define SMU__NUM_PCIE_DPM_LEVELS 8 | 35 | #define SMU__NUM_PCIE_DPM_LEVELS 8 |
| 36 | 36 | ||
| 37 | #define EXP_M1 35 | ||
| 38 | #define EXP_M2 92821 | ||
| 39 | #define EXP_B 66629747 | ||
| 40 | |||
| 41 | #define EXP_M1_1 365 | ||
| 42 | #define EXP_M2_1 658700 | ||
| 43 | #define EXP_B_1 305506134 | ||
| 44 | |||
| 45 | #define EXP_M1_2 189 | ||
| 46 | #define EXP_M2_2 379692 | ||
| 47 | #define EXP_B_2 194609469 | ||
| 48 | |||
| 49 | #define EXP_M1_3 99 | ||
| 50 | #define EXP_M2_3 217915 | ||
| 51 | #define EXP_B_3 122255994 | ||
| 52 | |||
| 53 | #define EXP_M1_4 51 | ||
| 54 | #define EXP_M2_4 122643 | ||
| 55 | #define EXP_B_4 74893384 | ||
| 56 | |||
| 57 | #define EXP_M1_5 423 | ||
| 58 | #define EXP_M2_5 1103326 | ||
| 59 | #define EXP_B_5 728122621 | ||
| 60 | |||
| 37 | enum SID_OPTION { | 61 | enum SID_OPTION { |
| 38 | SID_OPTION_HI, | 62 | SID_OPTION_HI, |
| 39 | SID_OPTION_LO, | 63 | SID_OPTION_LO, |
| @@ -548,20 +572,20 @@ struct SMU74_Firmware_Header { | |||
| 548 | uint32_t CacConfigTable; | 572 | uint32_t CacConfigTable; |
| 549 | uint32_t CacStatusTable; | 573 | uint32_t CacStatusTable; |
| 550 | 574 | ||
| 551 | |||
| 552 | uint32_t mcRegisterTable; | 575 | uint32_t mcRegisterTable; |
| 553 | 576 | ||
| 554 | |||
| 555 | uint32_t mcArbDramTimingTable; | 577 | uint32_t mcArbDramTimingTable; |
| 556 | 578 | ||
| 557 | |||
| 558 | |||
| 559 | |||
| 560 | uint32_t PmFuseTable; | 579 | uint32_t PmFuseTable; |
| 561 | uint32_t Globals; | 580 | uint32_t Globals; |
| 562 | uint32_t ClockStretcherTable; | 581 | uint32_t ClockStretcherTable; |
| 563 | uint32_t VftTable; | 582 | uint32_t VftTable; |
| 564 | uint32_t Reserved[21]; | 583 | uint32_t Reserved1; |
| 584 | uint32_t AvfsTable; | ||
| 585 | uint32_t AvfsCksOffGbvTable; | ||
| 586 | uint32_t AvfsMeanNSigma; | ||
| 587 | uint32_t AvfsSclkOffsetTable; | ||
| 588 | uint32_t Reserved[16]; | ||
| 565 | uint32_t Signature; | 589 | uint32_t Signature; |
| 566 | }; | 590 | }; |
| 567 | 591 | ||
| @@ -701,8 +725,6 @@ VR Config info is contained in dpmTable.VRConfig */ | |||
| 701 | struct SMU_ClockStretcherDataTableEntry { | 725 | struct SMU_ClockStretcherDataTableEntry { |
| 702 | uint8_t minVID; | 726 | uint8_t minVID; |
| 703 | uint8_t maxVID; | 727 | uint8_t maxVID; |
| 704 | |||
| 705 | |||
| 706 | uint16_t setting; | 728 | uint16_t setting; |
| 707 | }; | 729 | }; |
| 708 | typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry; | 730 | typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry; |
| @@ -769,6 +791,43 @@ struct VFT_TABLE_t { | |||
| 769 | typedef struct VFT_TABLE_t VFT_TABLE_t; | 791 | typedef struct VFT_TABLE_t VFT_TABLE_t; |
| 770 | 792 | ||
| 771 | 793 | ||
| 794 | /* Total margin, root mean square of Fmax + DC + Platform */ | ||
| 795 | struct AVFS_Margin_t { | ||
| 796 | VFT_CELL_t Cell[NUM_VFT_COLUMNS]; | ||
| 797 | }; | ||
| 798 | typedef struct AVFS_Margin_t AVFS_Margin_t; | ||
| 799 | |||
| 800 | #define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2 | ||
| 801 | #define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2 | ||
| 802 | |||
| 803 | struct GB_VDROOP_TABLE_t { | ||
| 804 | int32_t a0; | ||
| 805 | int32_t a1; | ||
| 806 | int32_t a2; | ||
| 807 | uint32_t spare; | ||
| 808 | }; | ||
| 809 | typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t; | ||
| 810 | |||
| 811 | struct AVFS_CksOff_Gbv_t { | ||
| 812 | VFT_CELL_t Cell[NUM_VFT_COLUMNS]; | ||
| 813 | }; | ||
| 814 | typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t; | ||
| 815 | |||
| 816 | struct AVFS_meanNsigma_t { | ||
| 817 | uint32_t Aconstant[3]; | ||
| 818 | uint16_t DC_tol_sigma; | ||
| 819 | uint16_t Platform_mean; | ||
| 820 | uint16_t Platform_sigma; | ||
| 821 | uint16_t PSM_Age_CompFactor; | ||
| 822 | uint8_t Static_Voltage_Offset[NUM_VFT_COLUMNS]; | ||
| 823 | }; | ||
| 824 | typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t; | ||
| 825 | |||
| 826 | struct AVFS_Sclk_Offset_t { | ||
| 827 | uint16_t Sclk_Offset[8]; | ||
| 828 | }; | ||
| 829 | typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t; | ||
| 830 | |||
| 772 | #endif | 831 | #endif |
| 773 | 832 | ||
| 774 | 833 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h index 0dfe82336dc7..b85ff5400e57 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h | |||
| @@ -223,6 +223,16 @@ struct SMU74_Discrete_StateInfo { | |||
| 223 | 223 | ||
| 224 | typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo; | 224 | typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo; |
| 225 | 225 | ||
| 226 | struct SMU_QuadraticCoeffs { | ||
| 227 | int32_t m1; | ||
| 228 | uint32_t b; | ||
| 229 | |||
| 230 | int16_t m2; | ||
| 231 | uint8_t m1_shift; | ||
| 232 | uint8_t m2_shift; | ||
| 233 | }; | ||
| 234 | typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs; | ||
| 235 | |||
| 226 | struct SMU74_Discrete_DpmTable { | 236 | struct SMU74_Discrete_DpmTable { |
| 227 | 237 | ||
| 228 | SMU74_PIDController GraphicsPIDController; | 238 | SMU74_PIDController GraphicsPIDController; |
| @@ -258,7 +268,14 @@ struct SMU74_Discrete_DpmTable { | |||
| 258 | uint8_t ThermOutPolarity; | 268 | uint8_t ThermOutPolarity; |
| 259 | uint8_t ThermOutMode; | 269 | uint8_t ThermOutMode; |
| 260 | uint8_t BootPhases; | 270 | uint8_t BootPhases; |
| 261 | uint32_t Reserved[4]; | 271 | |
| 272 | uint8_t VRHotLevel; | ||
| 273 | uint8_t Reserved1[3]; | ||
| 274 | uint16_t FanStartTemperature; | ||
| 275 | uint16_t FanStopTemperature; | ||
| 276 | uint16_t MaxVoltage; | ||
| 277 | uint16_t Reserved2; | ||
| 278 | uint32_t Reserved[1]; | ||
| 262 | 279 | ||
| 263 | SMU74_Discrete_GraphicsLevel GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS]; | 280 | SMU74_Discrete_GraphicsLevel GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS]; |
| 264 | SMU74_Discrete_MemoryLevel MemoryACPILevel; | 281 | SMU74_Discrete_MemoryLevel MemoryACPILevel; |
| @@ -347,6 +364,8 @@ struct SMU74_Discrete_DpmTable { | |||
| 347 | 364 | ||
| 348 | uint32_t CurrSclkPllRange; | 365 | uint32_t CurrSclkPllRange; |
| 349 | sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE]; | 366 | sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE]; |
| 367 | GB_VDROOP_TABLE_t BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES]; | ||
| 368 | SMU_QuadraticCoeffs AVFSGB_VDROOP_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES]; | ||
| 350 | }; | 369 | }; |
| 351 | 370 | ||
| 352 | typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable; | 371 | typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable; |
| @@ -550,16 +569,6 @@ struct SMU7_AcpiScoreboard { | |||
| 550 | 569 | ||
| 551 | typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard; | 570 | typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard; |
| 552 | 571 | ||
| 553 | struct SMU_QuadraticCoeffs { | ||
| 554 | int32_t m1; | ||
| 555 | uint32_t b; | ||
| 556 | |||
| 557 | int16_t m2; | ||
| 558 | uint8_t m1_shift; | ||
| 559 | uint8_t m2_shift; | ||
| 560 | }; | ||
| 561 | typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs; | ||
| 562 | |||
| 563 | struct SMU74_Discrete_PmFuses { | 572 | struct SMU74_Discrete_PmFuses { |
| 564 | uint8_t BapmVddCVidHiSidd[8]; | 573 | uint8_t BapmVddCVidHiSidd[8]; |
| 565 | uint8_t BapmVddCVidLoSidd[8]; | 574 | uint8_t BapmVddCVidLoSidd[8]; |
| @@ -821,6 +830,17 @@ typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard; | |||
| 821 | #define DB_PCC_SHIFT 26 | 830 | #define DB_PCC_SHIFT 26 |
| 822 | #define DB_EDC_SHIFT 27 | 831 | #define DB_EDC_SHIFT 27 |
| 823 | 832 | ||
| 833 | #define BTCGB0_Vdroop_Enable_MASK 0x1 | ||
| 834 | #define BTCGB1_Vdroop_Enable_MASK 0x2 | ||
| 835 | #define AVFSGB0_Vdroop_Enable_MASK 0x4 | ||
| 836 | #define AVFSGB1_Vdroop_Enable_MASK 0x8 | ||
| 837 | |||
| 838 | #define BTCGB0_Vdroop_Enable_SHIFT 0 | ||
| 839 | #define BTCGB1_Vdroop_Enable_SHIFT 1 | ||
| 840 | #define AVFSGB0_Vdroop_Enable_SHIFT 2 | ||
| 841 | #define AVFSGB1_Vdroop_Enable_SHIFT 3 | ||
| 842 | |||
| 843 | |||
| 824 | #pragma pack(pop) | 844 | #pragma pack(pop) |
| 825 | 845 | ||
| 826 | 846 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 673a75c74e18..8e52a2e82db5 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | |||
| @@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) | |||
| 1006 | 1006 | ||
| 1007 | static int fiji_smu_fini(struct pp_smumgr *smumgr) | 1007 | static int fiji_smu_fini(struct pp_smumgr *smumgr) |
| 1008 | { | 1008 | { |
| 1009 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); | ||
| 1010 | |||
| 1011 | smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); | ||
| 1012 | |||
| 1009 | if (smumgr->backend) { | 1013 | if (smumgr->backend) { |
| 1010 | kfree(smumgr->backend); | 1014 | kfree(smumgr->backend); |
| 1011 | smumgr->backend = NULL; | 1015 | smumgr->backend = NULL; |
| 1012 | } | 1016 | } |
| 1017 | |||
| 1018 | cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); | ||
| 1013 | return 0; | 1019 | return 0; |
| 1014 | } | 1020 | } |
| 1015 | 1021 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index de618ead9db8..5dba7c509710 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | |||
| @@ -52,19 +52,18 @@ | |||
| 52 | static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { | 52 | static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { |
| 53 | /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */ | 53 | /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */ |
| 54 | /* Voltage, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */ | 54 | /* Voltage, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */ |
| 55 | { 0x3c0fd047, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x30750000, 0, 0, 0, 0, 0, 0, 0 } }, | 55 | { 0x100ea446, 0x00, 0x03, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x30750000, 0x3000, 0, 0x2600, 0, 0, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } }, |
| 56 | { 0xa00fd047, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x409c0000, 0, 0, 0, 0, 0, 0, 0 } }, | 56 | { 0x400ea446, 0x01, 0x04, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x409c0000, 0x2000, 0, 0x1e00, 1, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } }, |
| 57 | { 0x0410d047, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0, 0, 0x0e, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x50c30000, 0, 0, 0, 0, 0, 0, 0 } }, | 57 | { 0x740ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x50c30000, 0x2800, 0, 0x2000, 1, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }, |
| 58 | { 0x6810d047, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x60ea0000, 0, 0, 0, 0, 0, 0, 0 } }, | 58 | { 0xa40ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x60ea0000, 0x3000, 0, 0x2600, 1, 1, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } }, |
| 59 | { 0xcc10d047, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xe8fd0000, 0, 0, 0, 0, 0, 0, 0 } }, | 59 | { 0xd80ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x70110100, 0x3800, 0, 0x2c00, 1, 1, 0x0004, 0x1203, 0xffff, 0x3600, 0xc9e2, 0x2e00 } }, |
| 60 | { 0x3011d047, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x70110100, 0, 0, 0, 0, 0, 0, 0 } }, | 60 | { 0x3c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x80380100, 0x2000, 0, 0x1e00, 2, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } }, |
| 61 | { 0x9411d047, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xf8240100, 0, 0, 0, 0, 0, 0, 0 } }, | 61 | { 0x6c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x905f0100, 0x2400, 0, 0x1e00, 2, 1, 0x0004, 0x8901, 0xffff, 0x2300, 0x314c, 0x1d00 } }, |
| 62 | { 0xf811d047, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x80380100, 0, 0, 0, 0, 0, 0, 0 } } | 62 | { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } } |
| 63 | }; | 63 | }; |
| 64 | 64 | ||
| 65 | static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = | 65 | static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = |
| 66 | {0x50140000, 0x50140000, 0x00320000, 0x00, 0x00, | 66 | {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; |
| 67 | 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0000, 0x00, 0x00}; | ||
| 68 | 67 | ||
| 69 | /** | 68 | /** |
| 70 | * Set the address for reading/writing the SMC SRAM space. | 69 | * Set the address for reading/writing the SMC SRAM space. |
| @@ -219,6 +218,18 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr) | |||
| 219 | && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); | 218 | && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); |
| 220 | } | 219 | } |
| 221 | 220 | ||
| 221 | static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) | ||
| 222 | { | ||
| 223 | uint32_t efuse; | ||
| 224 | |||
| 225 | efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4)); | ||
| 226 | efuse &= 0x00000001; | ||
| 227 | if (efuse) | ||
| 228 | return true; | ||
| 229 | |||
| 230 | return false; | ||
| 231 | } | ||
| 232 | |||
| 222 | /** | 233 | /** |
| 223 | * Send a message to the SMC, and wait for its response. | 234 | * Send a message to the SMC, and wait for its response. |
| 224 | * | 235 | * |
| @@ -228,21 +239,27 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr) | |||
| 228 | */ | 239 | */ |
| 229 | int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) | 240 | int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) |
| 230 | { | 241 | { |
| 242 | int ret; | ||
| 243 | |||
| 231 | if (!polaris10_is_smc_ram_running(smumgr)) | 244 | if (!polaris10_is_smc_ram_running(smumgr)) |
| 232 | return -1; | 245 | return -1; |
| 233 | 246 | ||
| 247 | |||
| 234 | SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); | 248 | SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); |
| 235 | 249 | ||
| 236 | if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) | 250 | ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); |
| 237 | printk("Failed to send Previous Message.\n"); | ||
| 238 | 251 | ||
| 252 | if (ret != 1) | ||
| 253 | printk("\n failed to send pre message %x ret is %d \n", msg, ret); | ||
| 239 | 254 | ||
| 240 | cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); | 255 | cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); |
| 241 | 256 | ||
| 242 | SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); | 257 | SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); |
| 243 | 258 | ||
| 244 | if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) | 259 | ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); |
| 245 | printk("Failed to send Message.\n"); | 260 | |
| 261 | if (ret != 1) | ||
| 262 | printk("\n failed to send message %x ret is %d \n", msg, ret); | ||
| 246 | 263 | ||
| 247 | return 0; | 264 | return 0; |
| 248 | } | 265 | } |
| @@ -469,6 +486,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr) | |||
| 469 | kfree(smumgr->backend); | 486 | kfree(smumgr->backend); |
| 470 | smumgr->backend = NULL; | 487 | smumgr->backend = NULL; |
| 471 | } | 488 | } |
| 489 | cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); | ||
| 472 | return 0; | 490 | return 0; |
| 473 | } | 491 | } |
| 474 | 492 | ||
| @@ -952,6 +970,11 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) | |||
| 952 | (cgs_handle_t)smu_data->smu_buffer.handle); | 970 | (cgs_handle_t)smu_data->smu_buffer.handle); |
| 953 | return -1;); | 971 | return -1;); |
| 954 | 972 | ||
| 973 | if (polaris10_is_hw_avfs_present(smumgr)) | ||
| 974 | smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; | ||
| 975 | else | ||
| 976 | smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; | ||
| 977 | |||
| 955 | return 0; | 978 | return 0; |
| 956 | } | 979 | } |
| 957 | 980 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index c483baf6b4fb..0728c1e3d97a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | |||
| @@ -81,6 +81,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) | |||
| 81 | 81 | ||
| 82 | int smum_fini(struct pp_smumgr *smumgr) | 82 | int smum_fini(struct pp_smumgr *smumgr) |
| 83 | { | 83 | { |
| 84 | kfree(smumgr->device); | ||
| 84 | kfree(smumgr); | 85 | kfree(smumgr); |
| 85 | return 0; | 86 | return 0; |
| 86 | } | 87 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 32820b680d88..b22722eabafc 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | |||
| @@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, | |||
| 328 | 328 | ||
| 329 | static int tonga_smu_fini(struct pp_smumgr *smumgr) | 329 | static int tonga_smu_fini(struct pp_smumgr *smumgr) |
| 330 | { | 330 | { |
| 331 | struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend); | ||
| 332 | |||
| 333 | smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle); | ||
| 334 | smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); | ||
| 335 | |||
| 331 | if (smumgr->backend != NULL) { | 336 | if (smumgr->backend != NULL) { |
| 332 | kfree(smumgr->backend); | 337 | kfree(smumgr->backend); |
| 333 | smumgr->backend = NULL; | 338 | smumgr->backend = NULL; |
| 334 | } | 339 | } |
| 340 | |||
| 341 | cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); | ||
| 335 | return 0; | 342 | return 0; |
| 336 | } | 343 | } |
| 337 | 344 | ||
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 39802c0539b6..3d34fc4ca826 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c | |||
| @@ -266,9 +266,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev) | |||
| 266 | if (!ret) | 266 | if (!ret) |
| 267 | ret = atmel_hlcdc_check_endpoint(dev, &ep); | 267 | ret = atmel_hlcdc_check_endpoint(dev, &ep); |
| 268 | 268 | ||
| 269 | of_node_put(ep_np); | 269 | if (ret) { |
| 270 | if (ret) | 270 | of_node_put(ep_np); |
| 271 | return ret; | 271 | return ret; |
| 272 | } | ||
| 272 | } | 273 | } |
| 273 | 274 | ||
| 274 | for_each_endpoint_of_node(dev->dev->of_node, ep_np) { | 275 | for_each_endpoint_of_node(dev->dev->of_node, ep_np) { |
| @@ -276,9 +277,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev) | |||
| 276 | if (!ret) | 277 | if (!ret) |
| 277 | ret = atmel_hlcdc_attach_endpoint(dev, &ep); | 278 | ret = atmel_hlcdc_attach_endpoint(dev, &ep); |
| 278 | 279 | ||
| 279 | of_node_put(ep_np); | 280 | if (ret) { |
| 280 | if (ret) | 281 | of_node_put(ep_np); |
| 281 | return ret; | 282 | return ret; |
| 283 | } | ||
| 282 | } | 284 | } |
| 283 | 285 | ||
| 284 | return 0; | 286 | return 0; |
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index aef3ca8a81fa..016c191221f3 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c | |||
| @@ -339,6 +339,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane, | |||
| 339 | 339 | ||
| 340 | atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, | 340 | atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, |
| 341 | factor_reg); | 341 | factor_reg); |
| 342 | } else { | ||
| 343 | atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0); | ||
| 342 | } | 344 | } |
| 343 | } | 345 | } |
| 344 | 346 | ||
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index c204ef32df16..9bb99e274d23 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
| @@ -1296,14 +1296,39 @@ EXPORT_SYMBOL(drm_atomic_add_affected_planes); | |||
| 1296 | */ | 1296 | */ |
| 1297 | void drm_atomic_legacy_backoff(struct drm_atomic_state *state) | 1297 | void drm_atomic_legacy_backoff(struct drm_atomic_state *state) |
| 1298 | { | 1298 | { |
| 1299 | struct drm_device *dev = state->dev; | ||
| 1300 | unsigned crtc_mask = 0; | ||
| 1301 | struct drm_crtc *crtc; | ||
| 1299 | int ret; | 1302 | int ret; |
| 1303 | bool global = false; | ||
| 1304 | |||
| 1305 | drm_for_each_crtc(crtc, dev) { | ||
| 1306 | if (crtc->acquire_ctx != state->acquire_ctx) | ||
| 1307 | continue; | ||
| 1308 | |||
| 1309 | crtc_mask |= drm_crtc_mask(crtc); | ||
| 1310 | crtc->acquire_ctx = NULL; | ||
| 1311 | } | ||
| 1312 | |||
| 1313 | if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) { | ||
| 1314 | global = true; | ||
| 1315 | |||
| 1316 | dev->mode_config.acquire_ctx = NULL; | ||
| 1317 | } | ||
| 1300 | 1318 | ||
| 1301 | retry: | 1319 | retry: |
| 1302 | drm_modeset_backoff(state->acquire_ctx); | 1320 | drm_modeset_backoff(state->acquire_ctx); |
| 1303 | 1321 | ||
| 1304 | ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx); | 1322 | ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); |
| 1305 | if (ret) | 1323 | if (ret) |
| 1306 | goto retry; | 1324 | goto retry; |
| 1325 | |||
| 1326 | drm_for_each_crtc(crtc, dev) | ||
| 1327 | if (drm_crtc_mask(crtc) & crtc_mask) | ||
| 1328 | crtc->acquire_ctx = state->acquire_ctx; | ||
| 1329 | |||
| 1330 | if (global) | ||
| 1331 | dev->mode_config.acquire_ctx = state->acquire_ctx; | ||
| 1307 | } | 1332 | } |
| 1308 | EXPORT_SYMBOL(drm_atomic_legacy_backoff); | 1333 | EXPORT_SYMBOL(drm_atomic_legacy_backoff); |
| 1309 | 1334 | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index a6e42433ef0e..26feb2f8453f 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
| @@ -528,11 +528,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) | |||
| 528 | int drm_crtc_helper_set_config(struct drm_mode_set *set) | 528 | int drm_crtc_helper_set_config(struct drm_mode_set *set) |
| 529 | { | 529 | { |
| 530 | struct drm_device *dev; | 530 | struct drm_device *dev; |
| 531 | struct drm_crtc *new_crtc; | 531 | struct drm_crtc **save_encoder_crtcs, *new_crtc; |
| 532 | struct drm_encoder *save_encoders, *new_encoder, *encoder; | 532 | struct drm_encoder **save_connector_encoders, *new_encoder, *encoder; |
| 533 | bool mode_changed = false; /* if true do a full mode set */ | 533 | bool mode_changed = false; /* if true do a full mode set */ |
| 534 | bool fb_changed = false; /* if true and !mode_changed just do a flip */ | 534 | bool fb_changed = false; /* if true and !mode_changed just do a flip */ |
| 535 | struct drm_connector *save_connectors, *connector; | 535 | struct drm_connector *connector; |
| 536 | int count = 0, ro, fail = 0; | 536 | int count = 0, ro, fail = 0; |
| 537 | const struct drm_crtc_helper_funcs *crtc_funcs; | 537 | const struct drm_crtc_helper_funcs *crtc_funcs; |
| 538 | struct drm_mode_set save_set; | 538 | struct drm_mode_set save_set; |
| @@ -574,15 +574,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 574 | * Allocate space for the backup of all (non-pointer) encoder and | 574 | * Allocate space for the backup of all (non-pointer) encoder and |
| 575 | * connector data. | 575 | * connector data. |
| 576 | */ | 576 | */ |
| 577 | save_encoders = kzalloc(dev->mode_config.num_encoder * | 577 | save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder * |
| 578 | sizeof(struct drm_encoder), GFP_KERNEL); | 578 | sizeof(struct drm_crtc *), GFP_KERNEL); |
| 579 | if (!save_encoders) | 579 | if (!save_encoder_crtcs) |
| 580 | return -ENOMEM; | 580 | return -ENOMEM; |
| 581 | 581 | ||
| 582 | save_connectors = kzalloc(dev->mode_config.num_connector * | 582 | save_connector_encoders = kzalloc(dev->mode_config.num_connector * |
| 583 | sizeof(struct drm_connector), GFP_KERNEL); | 583 | sizeof(struct drm_encoder *), GFP_KERNEL); |
| 584 | if (!save_connectors) { | 584 | if (!save_connector_encoders) { |
| 585 | kfree(save_encoders); | 585 | kfree(save_encoder_crtcs); |
| 586 | return -ENOMEM; | 586 | return -ENOMEM; |
| 587 | } | 587 | } |
| 588 | 588 | ||
| @@ -593,12 +593,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 593 | */ | 593 | */ |
| 594 | count = 0; | 594 | count = 0; |
| 595 | drm_for_each_encoder(encoder, dev) { | 595 | drm_for_each_encoder(encoder, dev) { |
| 596 | save_encoders[count++] = *encoder; | 596 | save_encoder_crtcs[count++] = encoder->crtc; |
| 597 | } | 597 | } |
| 598 | 598 | ||
| 599 | count = 0; | 599 | count = 0; |
| 600 | drm_for_each_connector(connector, dev) { | 600 | drm_for_each_connector(connector, dev) { |
| 601 | save_connectors[count++] = *connector; | 601 | save_connector_encoders[count++] = connector->encoder; |
| 602 | } | 602 | } |
| 603 | 603 | ||
| 604 | save_set.crtc = set->crtc; | 604 | save_set.crtc = set->crtc; |
| @@ -631,8 +631,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 631 | mode_changed = true; | 631 | mode_changed = true; |
| 632 | } | 632 | } |
| 633 | 633 | ||
| 634 | /* take a reference on all connectors in set */ | 634 | /* take a reference on all unbound connectors in set, reuse the |
| 635 | * already taken reference for bound connectors | ||
| 636 | */ | ||
| 635 | for (ro = 0; ro < set->num_connectors; ro++) { | 637 | for (ro = 0; ro < set->num_connectors; ro++) { |
| 638 | if (set->connectors[ro]->encoder) | ||
| 639 | continue; | ||
| 636 | drm_connector_reference(set->connectors[ro]); | 640 | drm_connector_reference(set->connectors[ro]); |
| 637 | } | 641 | } |
| 638 | 642 | ||
| @@ -754,30 +758,28 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 754 | } | 758 | } |
| 755 | } | 759 | } |
| 756 | 760 | ||
| 757 | /* after fail drop reference on all connectors in save set */ | 761 | kfree(save_connector_encoders); |
| 758 | count = 0; | 762 | kfree(save_encoder_crtcs); |
| 759 | drm_for_each_connector(connector, dev) { | ||
| 760 | drm_connector_unreference(&save_connectors[count++]); | ||
| 761 | } | ||
| 762 | |||
| 763 | kfree(save_connectors); | ||
| 764 | kfree(save_encoders); | ||
| 765 | return 0; | 763 | return 0; |
| 766 | 764 | ||
| 767 | fail: | 765 | fail: |
| 768 | /* Restore all previous data. */ | 766 | /* Restore all previous data. */ |
| 769 | count = 0; | 767 | count = 0; |
| 770 | drm_for_each_encoder(encoder, dev) { | 768 | drm_for_each_encoder(encoder, dev) { |
| 771 | *encoder = save_encoders[count++]; | 769 | encoder->crtc = save_encoder_crtcs[count++]; |
| 772 | } | 770 | } |
| 773 | 771 | ||
| 774 | count = 0; | 772 | count = 0; |
| 775 | drm_for_each_connector(connector, dev) { | 773 | drm_for_each_connector(connector, dev) { |
| 776 | *connector = save_connectors[count++]; | 774 | connector->encoder = save_connector_encoders[count++]; |
| 777 | } | 775 | } |
| 778 | 776 | ||
| 779 | /* after fail drop reference on all connectors in set */ | 777 | /* after fail drop reference on all unbound connectors in set, let |
| 778 | * bound connectors keep their reference | ||
| 779 | */ | ||
| 780 | for (ro = 0; ro < set->num_connectors; ro++) { | 780 | for (ro = 0; ro < set->num_connectors; ro++) { |
| 781 | if (set->connectors[ro]->encoder) | ||
| 782 | continue; | ||
| 781 | drm_connector_unreference(set->connectors[ro]); | 783 | drm_connector_unreference(set->connectors[ro]); |
| 782 | } | 784 | } |
| 783 | 785 | ||
| @@ -787,8 +789,8 @@ fail: | |||
| 787 | save_set.y, save_set.fb)) | 789 | save_set.y, save_set.fb)) |
| 788 | DRM_ERROR("failed to restore config after modeset failure\n"); | 790 | DRM_ERROR("failed to restore config after modeset failure\n"); |
| 789 | 791 | ||
| 790 | kfree(save_connectors); | 792 | kfree(save_connector_encoders); |
| 791 | kfree(save_encoders); | 793 | kfree(save_encoder_crtcs); |
| 792 | return ret; | 794 | return ret; |
| 793 | } | 795 | } |
| 794 | EXPORT_SYMBOL(drm_crtc_helper_set_config); | 796 | EXPORT_SYMBOL(drm_crtc_helper_set_config); |
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index a13edf5de2d6..6537908050d7 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c | |||
| @@ -2927,11 +2927,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) | |||
| 2927 | drm_dp_port_teardown_pdt(port, port->pdt); | 2927 | drm_dp_port_teardown_pdt(port, port->pdt); |
| 2928 | 2928 | ||
| 2929 | if (!port->input && port->vcpi.vcpi > 0) { | 2929 | if (!port->input && port->vcpi.vcpi > 0) { |
| 2930 | if (mgr->mst_state) { | 2930 | drm_dp_mst_reset_vcpi_slots(mgr, port); |
| 2931 | drm_dp_mst_reset_vcpi_slots(mgr, port); | 2931 | drm_dp_update_payload_part1(mgr); |
| 2932 | drm_dp_update_payload_part1(mgr); | 2932 | drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); |
| 2933 | drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); | ||
| 2934 | } | ||
| 2935 | } | 2933 | } |
| 2936 | 2934 | ||
| 2937 | kref_put(&port->kref, drm_dp_free_mst_port); | 2935 | kref_put(&port->kref, drm_dp_free_mst_port); |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c index 522cfd447892..16353ee81651 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c | |||
| @@ -225,6 +225,7 @@ struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu) | |||
| 225 | 225 | ||
| 226 | etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; | 226 | etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; |
| 227 | etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; | 227 | etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; |
| 228 | etnaviv_domain->domain.pgsize_bitmap = SZ_4K; | ||
| 228 | etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; | 229 | etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; |
| 229 | etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1; | 230 | etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1; |
| 230 | 231 | ||
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index f6223f907c15..7f9901b7777b 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c | |||
| @@ -31,7 +31,6 @@ | |||
| 31 | #include "exynos_drm_plane.h" | 31 | #include "exynos_drm_plane.h" |
| 32 | #include "exynos_drm_drv.h" | 32 | #include "exynos_drm_drv.h" |
| 33 | #include "exynos_drm_fb.h" | 33 | #include "exynos_drm_fb.h" |
| 34 | #include "exynos_drm_fbdev.h" | ||
| 35 | #include "exynos_drm_iommu.h" | 34 | #include "exynos_drm_iommu.h" |
| 36 | 35 | ||
| 37 | /* | 36 | /* |
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index 468498e3fec1..4c1fb3f8b5a6 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | 34 | ||
| 35 | struct exynos_dp_device { | 35 | struct exynos_dp_device { |
| 36 | struct drm_encoder encoder; | 36 | struct drm_encoder encoder; |
| 37 | struct drm_connector connector; | 37 | struct drm_connector *connector; |
| 38 | struct drm_bridge *ptn_bridge; | 38 | struct drm_bridge *ptn_bridge; |
| 39 | struct drm_device *drm_dev; | 39 | struct drm_device *drm_dev; |
| 40 | struct device *dev; | 40 | struct device *dev; |
| @@ -70,7 +70,7 @@ static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data) | |||
| 70 | static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data) | 70 | static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data) |
| 71 | { | 71 | { |
| 72 | struct exynos_dp_device *dp = to_dp(plat_data); | 72 | struct exynos_dp_device *dp = to_dp(plat_data); |
| 73 | struct drm_connector *connector = &dp->connector; | 73 | struct drm_connector *connector = dp->connector; |
| 74 | struct drm_display_mode *mode; | 74 | struct drm_display_mode *mode; |
| 75 | int num_modes = 0; | 75 | int num_modes = 0; |
| 76 | 76 | ||
| @@ -103,6 +103,7 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data, | |||
| 103 | int ret; | 103 | int ret; |
| 104 | 104 | ||
| 105 | drm_connector_register(connector); | 105 | drm_connector_register(connector); |
| 106 | dp->connector = connector; | ||
| 106 | 107 | ||
| 107 | /* Pre-empt DP connector creation if there's a bridge */ | 108 | /* Pre-empt DP connector creation if there's a bridge */ |
| 108 | if (dp->ptn_bridge) { | 109 | if (dp->ptn_bridge) { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index 011211e4167d..edbd98ff293e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include <drm/drmP.h> | 15 | #include <drm/drmP.h> |
| 16 | #include "exynos_drm_drv.h" | 16 | #include "exynos_drm_drv.h" |
| 17 | #include "exynos_drm_crtc.h" | 17 | #include "exynos_drm_crtc.h" |
| 18 | #include "exynos_drm_fbdev.h" | ||
| 19 | 18 | ||
| 20 | static LIST_HEAD(exynos_drm_subdrv_list); | 19 | static LIST_HEAD(exynos_drm_subdrv_list); |
| 21 | 20 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 3efe1aa89416..d47216488985 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
| @@ -30,7 +30,6 @@ | |||
| 30 | 30 | ||
| 31 | #include "exynos_drm_drv.h" | 31 | #include "exynos_drm_drv.h" |
| 32 | #include "exynos_drm_fb.h" | 32 | #include "exynos_drm_fb.h" |
| 33 | #include "exynos_drm_fbdev.h" | ||
| 34 | #include "exynos_drm_crtc.h" | 33 | #include "exynos_drm_crtc.h" |
| 35 | #include "exynos_drm_plane.h" | 34 | #include "exynos_drm_plane.h" |
| 36 | #include "exynos_drm_iommu.h" | 35 | #include "exynos_drm_iommu.h" |
| @@ -120,7 +119,6 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = { | |||
| 120 | .timing_base = 0x0, | 119 | .timing_base = 0x0, |
| 121 | .has_clksel = 1, | 120 | .has_clksel = 1, |
| 122 | .has_limited_fmt = 1, | 121 | .has_limited_fmt = 1, |
| 123 | .has_hw_trigger = 1, | ||
| 124 | }; | 122 | }; |
| 125 | 123 | ||
| 126 | static struct fimd_driver_data exynos3_fimd_driver_data = { | 124 | static struct fimd_driver_data exynos3_fimd_driver_data = { |
| @@ -171,14 +169,11 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = { | |||
| 171 | .lcdblk_vt_shift = 24, | 169 | .lcdblk_vt_shift = 24, |
| 172 | .lcdblk_bypass_shift = 15, | 170 | .lcdblk_bypass_shift = 15, |
| 173 | .lcdblk_mic_bypass_shift = 11, | 171 | .lcdblk_mic_bypass_shift = 11, |
| 174 | .trg_type = I80_HW_TRG, | ||
| 175 | .has_shadowcon = 1, | 172 | .has_shadowcon = 1, |
| 176 | .has_vidoutcon = 1, | 173 | .has_vidoutcon = 1, |
| 177 | .has_vtsel = 1, | 174 | .has_vtsel = 1, |
| 178 | .has_mic_bypass = 1, | 175 | .has_mic_bypass = 1, |
| 179 | .has_dp_clk = 1, | 176 | .has_dp_clk = 1, |
| 180 | .has_hw_trigger = 1, | ||
| 181 | .has_trigger_per_te = 1, | ||
| 182 | }; | 177 | }; |
| 183 | 178 | ||
| 184 | struct fimd_context { | 179 | struct fimd_context { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 493552368295..8564c3da0d22 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
| @@ -48,13 +48,13 @@ | |||
| 48 | 48 | ||
| 49 | /* registers for base address */ | 49 | /* registers for base address */ |
| 50 | #define G2D_SRC_BASE_ADDR 0x0304 | 50 | #define G2D_SRC_BASE_ADDR 0x0304 |
| 51 | #define G2D_SRC_STRIDE_REG 0x0308 | 51 | #define G2D_SRC_STRIDE 0x0308 |
| 52 | #define G2D_SRC_COLOR_MODE 0x030C | 52 | #define G2D_SRC_COLOR_MODE 0x030C |
| 53 | #define G2D_SRC_LEFT_TOP 0x0310 | 53 | #define G2D_SRC_LEFT_TOP 0x0310 |
| 54 | #define G2D_SRC_RIGHT_BOTTOM 0x0314 | 54 | #define G2D_SRC_RIGHT_BOTTOM 0x0314 |
| 55 | #define G2D_SRC_PLANE2_BASE_ADDR 0x0318 | 55 | #define G2D_SRC_PLANE2_BASE_ADDR 0x0318 |
| 56 | #define G2D_DST_BASE_ADDR 0x0404 | 56 | #define G2D_DST_BASE_ADDR 0x0404 |
| 57 | #define G2D_DST_STRIDE_REG 0x0408 | 57 | #define G2D_DST_STRIDE 0x0408 |
| 58 | #define G2D_DST_COLOR_MODE 0x040C | 58 | #define G2D_DST_COLOR_MODE 0x040C |
| 59 | #define G2D_DST_LEFT_TOP 0x0410 | 59 | #define G2D_DST_LEFT_TOP 0x0410 |
| 60 | #define G2D_DST_RIGHT_BOTTOM 0x0414 | 60 | #define G2D_DST_RIGHT_BOTTOM 0x0414 |
| @@ -563,7 +563,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset) | |||
| 563 | 563 | ||
| 564 | switch (reg_offset) { | 564 | switch (reg_offset) { |
| 565 | case G2D_SRC_BASE_ADDR: | 565 | case G2D_SRC_BASE_ADDR: |
| 566 | case G2D_SRC_STRIDE_REG: | 566 | case G2D_SRC_STRIDE: |
| 567 | case G2D_SRC_COLOR_MODE: | 567 | case G2D_SRC_COLOR_MODE: |
| 568 | case G2D_SRC_LEFT_TOP: | 568 | case G2D_SRC_LEFT_TOP: |
| 569 | case G2D_SRC_RIGHT_BOTTOM: | 569 | case G2D_SRC_RIGHT_BOTTOM: |
| @@ -573,7 +573,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset) | |||
| 573 | reg_type = REG_TYPE_SRC_PLANE2; | 573 | reg_type = REG_TYPE_SRC_PLANE2; |
| 574 | break; | 574 | break; |
| 575 | case G2D_DST_BASE_ADDR: | 575 | case G2D_DST_BASE_ADDR: |
| 576 | case G2D_DST_STRIDE_REG: | 576 | case G2D_DST_STRIDE: |
| 577 | case G2D_DST_COLOR_MODE: | 577 | case G2D_DST_COLOR_MODE: |
| 578 | case G2D_DST_LEFT_TOP: | 578 | case G2D_DST_LEFT_TOP: |
| 579 | case G2D_DST_RIGHT_BOTTOM: | 579 | case G2D_DST_RIGHT_BOTTOM: |
| @@ -968,8 +968,8 @@ static int g2d_check_reg_offset(struct device *dev, | |||
| 968 | } else | 968 | } else |
| 969 | buf_info->types[reg_type] = BUF_TYPE_GEM; | 969 | buf_info->types[reg_type] = BUF_TYPE_GEM; |
| 970 | break; | 970 | break; |
| 971 | case G2D_SRC_STRIDE_REG: | 971 | case G2D_SRC_STRIDE: |
| 972 | case G2D_DST_STRIDE_REG: | 972 | case G2D_DST_STRIDE: |
| 973 | if (for_addr) | 973 | if (for_addr) |
| 974 | goto err; | 974 | goto err; |
| 975 | 975 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 55f1d37c666a..77f12c00abf9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c | |||
| @@ -242,7 +242,7 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config, | |||
| 242 | state->v_ratio == (1 << 15)) | 242 | state->v_ratio == (1 << 15)) |
| 243 | height_ok = true; | 243 | height_ok = true; |
| 244 | 244 | ||
| 245 | if (width_ok & height_ok) | 245 | if (width_ok && height_ok) |
| 246 | return 0; | 246 | return 0; |
| 247 | 247 | ||
| 248 | DRM_DEBUG_KMS("scaling mode is not supported"); | 248 | DRM_DEBUG_KMS("scaling mode is not supported"); |
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 0ec1ad961e0d..dc723f7ead7d 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | |||
| @@ -42,9 +42,10 @@ static const struct regmap_config fsl_dcu_regmap_config = { | |||
| 42 | .reg_bits = 32, | 42 | .reg_bits = 32, |
| 43 | .reg_stride = 4, | 43 | .reg_stride = 4, |
| 44 | .val_bits = 32, | 44 | .val_bits = 32, |
| 45 | .cache_type = REGCACHE_RBTREE, | 45 | .cache_type = REGCACHE_FLAT, |
| 46 | 46 | ||
| 47 | .volatile_reg = fsl_dcu_drm_is_volatile_reg, | 47 | .volatile_reg = fsl_dcu_drm_is_volatile_reg, |
| 48 | .max_register = 0x11fc, | ||
| 48 | }; | 49 | }; |
| 49 | 50 | ||
| 50 | static int fsl_dcu_drm_irq_init(struct drm_device *dev) | 51 | static int fsl_dcu_drm_irq_init(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5faacc6e548d..7c334e902266 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -3481,6 +3481,7 @@ int intel_bios_init(struct drm_i915_private *dev_priv); | |||
| 3481 | bool intel_bios_is_valid_vbt(const void *buf, size_t size); | 3481 | bool intel_bios_is_valid_vbt(const void *buf, size_t size); |
| 3482 | bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); | 3482 | bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); |
| 3483 | bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); | 3483 | bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); |
| 3484 | bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); | ||
| 3484 | bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); | 3485 | bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); |
| 3485 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); | 3486 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); |
| 3486 | bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); | 3487 | bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b235b6e88ead..b9022fa053d6 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
| @@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, | |||
| 139 | else | 139 | else |
| 140 | panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; | 140 | panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; |
| 141 | 141 | ||
| 142 | panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) | | ||
| 143 | dvo_timing->himage_lo; | ||
| 144 | panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) | | ||
| 145 | dvo_timing->vimage_lo; | ||
| 146 | |||
| 142 | /* Some VBTs have bogus h/vtotal values */ | 147 | /* Some VBTs have bogus h/vtotal values */ |
| 143 | if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) | 148 | if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) |
| 144 | panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; | 149 | panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; |
| @@ -1187,7 +1192,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, | |||
| 1187 | } | 1192 | } |
| 1188 | if (bdb->version < 106) { | 1193 | if (bdb->version < 106) { |
| 1189 | expected_size = 22; | 1194 | expected_size = 22; |
| 1190 | } else if (bdb->version < 109) { | 1195 | } else if (bdb->version < 111) { |
| 1191 | expected_size = 27; | 1196 | expected_size = 27; |
| 1192 | } else if (bdb->version < 195) { | 1197 | } else if (bdb->version < 195) { |
| 1193 | BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); | 1198 | BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); |
| @@ -1546,6 +1551,45 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin) | |||
| 1546 | } | 1551 | } |
| 1547 | 1552 | ||
| 1548 | /** | 1553 | /** |
| 1554 | * intel_bios_is_port_present - is the specified digital port present | ||
| 1555 | * @dev_priv: i915 device instance | ||
| 1556 | * @port: port to check | ||
| 1557 | * | ||
| 1558 | * Return true if the device in %port is present. | ||
| 1559 | */ | ||
| 1560 | bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port) | ||
| 1561 | { | ||
| 1562 | static const struct { | ||
| 1563 | u16 dp, hdmi; | ||
| 1564 | } port_mapping[] = { | ||
| 1565 | [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, | ||
| 1566 | [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, | ||
| 1567 | [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, | ||
| 1568 | [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, | ||
| 1569 | }; | ||
| 1570 | int i; | ||
| 1571 | |||
| 1572 | /* FIXME maybe deal with port A as well? */ | ||
| 1573 | if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping)) | ||
| 1574 | return false; | ||
| 1575 | |||
| 1576 | if (!dev_priv->vbt.child_dev_num) | ||
| 1577 | return false; | ||
| 1578 | |||
| 1579 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { | ||
| 1580 | const union child_device_config *p_child = | ||
| 1581 | &dev_priv->vbt.child_dev[i]; | ||
| 1582 | if ((p_child->common.dvo_port == port_mapping[port].dp || | ||
| 1583 | p_child->common.dvo_port == port_mapping[port].hdmi) && | ||
| 1584 | (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING | | ||
| 1585 | DEVICE_TYPE_DISPLAYPORT_OUTPUT))) | ||
| 1586 | return true; | ||
| 1587 | } | ||
| 1588 | |||
| 1589 | return false; | ||
| 1590 | } | ||
| 1591 | |||
| 1592 | /** | ||
| 1549 | * intel_bios_is_port_edp - is the device in given port eDP | 1593 | * intel_bios_is_port_edp - is the device in given port eDP |
| 1550 | * @dev_priv: i915 device instance | 1594 | * @dev_priv: i915 device instance |
| 1551 | * @port: port to check | 1595 | * @port: port to check |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2113f401f0ba..56a1637c864f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -8275,12 +8275,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) | |||
| 8275 | { | 8275 | { |
| 8276 | struct drm_i915_private *dev_priv = dev->dev_private; | 8276 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 8277 | struct intel_encoder *encoder; | 8277 | struct intel_encoder *encoder; |
| 8278 | int i; | ||
| 8278 | u32 val, final; | 8279 | u32 val, final; |
| 8279 | bool has_lvds = false; | 8280 | bool has_lvds = false; |
| 8280 | bool has_cpu_edp = false; | 8281 | bool has_cpu_edp = false; |
| 8281 | bool has_panel = false; | 8282 | bool has_panel = false; |
| 8282 | bool has_ck505 = false; | 8283 | bool has_ck505 = false; |
| 8283 | bool can_ssc = false; | 8284 | bool can_ssc = false; |
| 8285 | bool using_ssc_source = false; | ||
| 8284 | 8286 | ||
| 8285 | /* We need to take the global config into account */ | 8287 | /* We need to take the global config into account */ |
| 8286 | for_each_intel_encoder(dev, encoder) { | 8288 | for_each_intel_encoder(dev, encoder) { |
| @@ -8307,8 +8309,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) | |||
| 8307 | can_ssc = true; | 8309 | can_ssc = true; |
| 8308 | } | 8310 | } |
| 8309 | 8311 | ||
| 8310 | DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", | 8312 | /* Check if any DPLLs are using the SSC source */ |
| 8311 | has_panel, has_lvds, has_ck505); | 8313 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { |
| 8314 | u32 temp = I915_READ(PCH_DPLL(i)); | ||
| 8315 | |||
| 8316 | if (!(temp & DPLL_VCO_ENABLE)) | ||
| 8317 | continue; | ||
| 8318 | |||
| 8319 | if ((temp & PLL_REF_INPUT_MASK) == | ||
| 8320 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { | ||
| 8321 | using_ssc_source = true; | ||
| 8322 | break; | ||
| 8323 | } | ||
| 8324 | } | ||
| 8325 | |||
| 8326 | DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", | ||
| 8327 | has_panel, has_lvds, has_ck505, using_ssc_source); | ||
| 8312 | 8328 | ||
| 8313 | /* Ironlake: try to setup display ref clock before DPLL | 8329 | /* Ironlake: try to setup display ref clock before DPLL |
| 8314 | * enabling. This is only under driver's control after | 8330 | * enabling. This is only under driver's control after |
| @@ -8345,9 +8361,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) | |||
| 8345 | final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | 8361 | final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; |
| 8346 | } else | 8362 | } else |
| 8347 | final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | 8363 | final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
| 8348 | } else { | 8364 | } else if (using_ssc_source) { |
| 8349 | final |= DREF_SSC_SOURCE_DISABLE; | 8365 | final |= DREF_SSC_SOURCE_ENABLE; |
| 8350 | final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | 8366 | final |= DREF_SSC1_ENABLE; |
| 8351 | } | 8367 | } |
| 8352 | 8368 | ||
| 8353 | if (final == val) | 8369 | if (final == val) |
| @@ -8393,7 +8409,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) | |||
| 8393 | POSTING_READ(PCH_DREF_CONTROL); | 8409 | POSTING_READ(PCH_DREF_CONTROL); |
| 8394 | udelay(200); | 8410 | udelay(200); |
| 8395 | } else { | 8411 | } else { |
| 8396 | DRM_DEBUG_KMS("Disabling SSC entirely\n"); | 8412 | DRM_DEBUG_KMS("Disabling CPU source output\n"); |
| 8397 | 8413 | ||
| 8398 | val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | 8414 | val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
| 8399 | 8415 | ||
| @@ -8404,16 +8420,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) | |||
| 8404 | POSTING_READ(PCH_DREF_CONTROL); | 8420 | POSTING_READ(PCH_DREF_CONTROL); |
| 8405 | udelay(200); | 8421 | udelay(200); |
| 8406 | 8422 | ||
| 8407 | /* Turn off the SSC source */ | 8423 | if (!using_ssc_source) { |
| 8408 | val &= ~DREF_SSC_SOURCE_MASK; | 8424 | DRM_DEBUG_KMS("Disabling SSC source\n"); |
| 8409 | val |= DREF_SSC_SOURCE_DISABLE; | ||
| 8410 | 8425 | ||
| 8411 | /* Turn off SSC1 */ | 8426 | /* Turn off the SSC source */ |
| 8412 | val &= ~DREF_SSC1_ENABLE; | 8427 | val &= ~DREF_SSC_SOURCE_MASK; |
| 8428 | val |= DREF_SSC_SOURCE_DISABLE; | ||
| 8413 | 8429 | ||
| 8414 | I915_WRITE(PCH_DREF_CONTROL, val); | 8430 | /* Turn off SSC1 */ |
| 8415 | POSTING_READ(PCH_DREF_CONTROL); | 8431 | val &= ~DREF_SSC1_ENABLE; |
| 8416 | udelay(200); | 8432 | |
| 8433 | I915_WRITE(PCH_DREF_CONTROL, val); | ||
| 8434 | POSTING_READ(PCH_DREF_CONTROL); | ||
| 8435 | udelay(200); | ||
| 8436 | } | ||
| 8417 | } | 8437 | } |
| 8418 | 8438 | ||
| 8419 | BUG_ON(val != final); | 8439 | BUG_ON(val != final); |
| @@ -14554,6 +14574,8 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 14554 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | 14574 | if (I915_READ(PCH_DP_D) & DP_DETECTED) |
| 14555 | intel_dp_init(dev, PCH_DP_D, PORT_D); | 14575 | intel_dp_init(dev, PCH_DP_D, PORT_D); |
| 14556 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { | 14576 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
| 14577 | bool has_edp, has_port; | ||
| 14578 | |||
| 14557 | /* | 14579 | /* |
| 14558 | * The DP_DETECTED bit is the latched state of the DDC | 14580 | * The DP_DETECTED bit is the latched state of the DDC |
| 14559 | * SDA pin at boot. However since eDP doesn't require DDC | 14581 | * SDA pin at boot. However since eDP doesn't require DDC |
| @@ -14562,27 +14584,37 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 14562 | * Thus we can't rely on the DP_DETECTED bit alone to detect | 14584 | * Thus we can't rely on the DP_DETECTED bit alone to detect |
| 14563 | * eDP ports. Consult the VBT as well as DP_DETECTED to | 14585 | * eDP ports. Consult the VBT as well as DP_DETECTED to |
| 14564 | * detect eDP ports. | 14586 | * detect eDP ports. |
| 14587 | * | ||
| 14588 | * Sadly the straps seem to be missing sometimes even for HDMI | ||
| 14589 | * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap | ||
| 14590 | * and VBT for the presence of the port. Additionally we can't | ||
| 14591 | * trust the port type the VBT declares as we've seen at least | ||
| 14592 | * HDMI ports that the VBT claim are DP or eDP. | ||
| 14565 | */ | 14593 | */ |
| 14566 | if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && | 14594 | has_edp = intel_dp_is_edp(dev, PORT_B); |
| 14567 | !intel_dp_is_edp(dev, PORT_B)) | 14595 | has_port = intel_bios_is_port_present(dev_priv, PORT_B); |
| 14596 | if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) | ||
| 14597 | has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); | ||
| 14598 | if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) | ||
| 14568 | intel_hdmi_init(dev, VLV_HDMIB, PORT_B); | 14599 | intel_hdmi_init(dev, VLV_HDMIB, PORT_B); |
| 14569 | if (I915_READ(VLV_DP_B) & DP_DETECTED || | ||
| 14570 | intel_dp_is_edp(dev, PORT_B)) | ||
| 14571 | intel_dp_init(dev, VLV_DP_B, PORT_B); | ||
| 14572 | 14600 | ||
| 14573 | if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && | 14601 | has_edp = intel_dp_is_edp(dev, PORT_C); |
| 14574 | !intel_dp_is_edp(dev, PORT_C)) | 14602 | has_port = intel_bios_is_port_present(dev_priv, PORT_C); |
| 14603 | if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) | ||
| 14604 | has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); | ||
| 14605 | if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) | ||
| 14575 | intel_hdmi_init(dev, VLV_HDMIC, PORT_C); | 14606 | intel_hdmi_init(dev, VLV_HDMIC, PORT_C); |
| 14576 | if (I915_READ(VLV_DP_C) & DP_DETECTED || | ||
| 14577 | intel_dp_is_edp(dev, PORT_C)) | ||
| 14578 | intel_dp_init(dev, VLV_DP_C, PORT_C); | ||
| 14579 | 14607 | ||
| 14580 | if (IS_CHERRYVIEW(dev)) { | 14608 | if (IS_CHERRYVIEW(dev)) { |
| 14581 | /* eDP not supported on port D, so don't check VBT */ | 14609 | /* |
| 14582 | if (I915_READ(CHV_HDMID) & SDVO_DETECTED) | 14610 | * eDP not supported on port D, |
| 14583 | intel_hdmi_init(dev, CHV_HDMID, PORT_D); | 14611 | * so no need to worry about it |
| 14584 | if (I915_READ(CHV_DP_D) & DP_DETECTED) | 14612 | */ |
| 14613 | has_port = intel_bios_is_port_present(dev_priv, PORT_D); | ||
| 14614 | if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) | ||
| 14585 | intel_dp_init(dev, CHV_DP_D, PORT_D); | 14615 | intel_dp_init(dev, CHV_DP_D, PORT_D); |
| 14616 | if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) | ||
| 14617 | intel_hdmi_init(dev, CHV_HDMID, PORT_D); | ||
| 14586 | } | 14618 | } |
| 14587 | 14619 | ||
| 14588 | intel_dsi_init(dev); | 14620 | intel_dsi_init(dev); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f192f58708c2..79cf2d5f5a20 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -4977,9 +4977,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) | |||
| 4977 | intel_display_power_get(dev_priv, power_domain); | 4977 | intel_display_power_get(dev_priv, power_domain); |
| 4978 | 4978 | ||
| 4979 | if (long_hpd) { | 4979 | if (long_hpd) { |
| 4980 | /* indicate that we need to restart link training */ | ||
| 4981 | intel_dp->train_set_valid = false; | ||
| 4982 | |||
| 4983 | intel_dp_long_pulse(intel_dp->attached_connector); | 4980 | intel_dp_long_pulse(intel_dp->attached_connector); |
| 4984 | if (intel_dp->is_mst) | 4981 | if (intel_dp->is_mst) |
| 4985 | ret = IRQ_HANDLED; | 4982 | ret = IRQ_HANDLED; |
| @@ -5725,8 +5722,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, | |||
| 5725 | if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { | 5722 | if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { |
| 5726 | fixed_mode = drm_mode_duplicate(dev, | 5723 | fixed_mode = drm_mode_duplicate(dev, |
| 5727 | dev_priv->vbt.lfp_lvds_vbt_mode); | 5724 | dev_priv->vbt.lfp_lvds_vbt_mode); |
| 5728 | if (fixed_mode) | 5725 | if (fixed_mode) { |
| 5729 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; | 5726 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; |
| 5727 | connector->display_info.width_mm = fixed_mode->width_mm; | ||
| 5728 | connector->display_info.height_mm = fixed_mode->height_mm; | ||
| 5729 | } | ||
| 5730 | } | 5730 | } |
| 5731 | mutex_unlock(&dev->mode_config.mutex); | 5731 | mutex_unlock(&dev->mode_config.mutex); |
| 5732 | 5732 | ||
| @@ -5923,9 +5923,9 @@ fail: | |||
| 5923 | return false; | 5923 | return false; |
| 5924 | } | 5924 | } |
| 5925 | 5925 | ||
| 5926 | void | 5926 | bool intel_dp_init(struct drm_device *dev, |
| 5927 | intel_dp_init(struct drm_device *dev, | 5927 | i915_reg_t output_reg, |
| 5928 | i915_reg_t output_reg, enum port port) | 5928 | enum port port) |
| 5929 | { | 5929 | { |
| 5930 | struct drm_i915_private *dev_priv = dev->dev_private; | 5930 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5931 | struct intel_digital_port *intel_dig_port; | 5931 | struct intel_digital_port *intel_dig_port; |
| @@ -5935,7 +5935,7 @@ intel_dp_init(struct drm_device *dev, | |||
| 5935 | 5935 | ||
| 5936 | intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); | 5936 | intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); |
| 5937 | if (!intel_dig_port) | 5937 | if (!intel_dig_port) |
| 5938 | return; | 5938 | return false; |
| 5939 | 5939 | ||
| 5940 | intel_connector = intel_connector_alloc(); | 5940 | intel_connector = intel_connector_alloc(); |
| 5941 | if (!intel_connector) | 5941 | if (!intel_connector) |
| @@ -5992,7 +5992,7 @@ intel_dp_init(struct drm_device *dev, | |||
| 5992 | if (!intel_dp_init_connector(intel_dig_port, intel_connector)) | 5992 | if (!intel_dp_init_connector(intel_dig_port, intel_connector)) |
| 5993 | goto err_init_connector; | 5993 | goto err_init_connector; |
| 5994 | 5994 | ||
| 5995 | return; | 5995 | return true; |
| 5996 | 5996 | ||
| 5997 | err_init_connector: | 5997 | err_init_connector: |
| 5998 | drm_encoder_cleanup(encoder); | 5998 | drm_encoder_cleanup(encoder); |
| @@ -6000,8 +6000,7 @@ err_encoder_init: | |||
| 6000 | kfree(intel_connector); | 6000 | kfree(intel_connector); |
| 6001 | err_connector_alloc: | 6001 | err_connector_alloc: |
| 6002 | kfree(intel_dig_port); | 6002 | kfree(intel_dig_port); |
| 6003 | 6003 | return false; | |
| 6004 | return; | ||
| 6005 | } | 6004 | } |
| 6006 | 6005 | ||
| 6007 | void intel_dp_mst_suspend(struct drm_device *dev) | 6006 | void intel_dp_mst_suspend(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c index 0b8eefc2acc5..60fb39cd220b 100644 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c | |||
| @@ -85,8 +85,7 @@ static bool | |||
| 85 | intel_dp_reset_link_train(struct intel_dp *intel_dp, | 85 | intel_dp_reset_link_train(struct intel_dp *intel_dp, |
| 86 | uint8_t dp_train_pat) | 86 | uint8_t dp_train_pat) |
| 87 | { | 87 | { |
| 88 | if (!intel_dp->train_set_valid) | 88 | memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); |
| 89 | memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); | ||
| 90 | intel_dp_set_signal_levels(intel_dp); | 89 | intel_dp_set_signal_levels(intel_dp); |
| 91 | return intel_dp_set_link_train(intel_dp, dp_train_pat); | 90 | return intel_dp_set_link_train(intel_dp, dp_train_pat); |
| 92 | } | 91 | } |
| @@ -161,23 +160,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) | |||
| 161 | break; | 160 | break; |
| 162 | } | 161 | } |
| 163 | 162 | ||
| 164 | /* | ||
| 165 | * if we used previously trained voltage and pre-emphasis values | ||
| 166 | * and we don't get clock recovery, reset link training values | ||
| 167 | */ | ||
| 168 | if (intel_dp->train_set_valid) { | ||
| 169 | DRM_DEBUG_KMS("clock recovery not ok, reset"); | ||
| 170 | /* clear the flag as we are not reusing train set */ | ||
| 171 | intel_dp->train_set_valid = false; | ||
| 172 | if (!intel_dp_reset_link_train(intel_dp, | ||
| 173 | DP_TRAINING_PATTERN_1 | | ||
| 174 | DP_LINK_SCRAMBLING_DISABLE)) { | ||
| 175 | DRM_ERROR("failed to enable link training\n"); | ||
| 176 | return; | ||
| 177 | } | ||
| 178 | continue; | ||
| 179 | } | ||
| 180 | |||
| 181 | /* Check to see if we've tried the max voltage */ | 163 | /* Check to see if we've tried the max voltage */ |
| 182 | for (i = 0; i < intel_dp->lane_count; i++) | 164 | for (i = 0; i < intel_dp->lane_count; i++) |
| 183 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | 165 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
| @@ -284,7 +266,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) | |||
| 284 | /* Make sure clock is still ok */ | 266 | /* Make sure clock is still ok */ |
| 285 | if (!drm_dp_clock_recovery_ok(link_status, | 267 | if (!drm_dp_clock_recovery_ok(link_status, |
| 286 | intel_dp->lane_count)) { | 268 | intel_dp->lane_count)) { |
| 287 | intel_dp->train_set_valid = false; | ||
| 288 | intel_dp_link_training_clock_recovery(intel_dp); | 269 | intel_dp_link_training_clock_recovery(intel_dp); |
| 289 | intel_dp_set_link_train(intel_dp, | 270 | intel_dp_set_link_train(intel_dp, |
| 290 | training_pattern | | 271 | training_pattern | |
| @@ -301,7 +282,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) | |||
| 301 | 282 | ||
| 302 | /* Try 5 times, then try clock recovery if that fails */ | 283 | /* Try 5 times, then try clock recovery if that fails */ |
| 303 | if (tries > 5) { | 284 | if (tries > 5) { |
| 304 | intel_dp->train_set_valid = false; | ||
| 305 | intel_dp_link_training_clock_recovery(intel_dp); | 285 | intel_dp_link_training_clock_recovery(intel_dp); |
| 306 | intel_dp_set_link_train(intel_dp, | 286 | intel_dp_set_link_train(intel_dp, |
| 307 | training_pattern | | 287 | training_pattern | |
| @@ -322,10 +302,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) | |||
| 322 | 302 | ||
| 323 | intel_dp_set_idle_link_train(intel_dp); | 303 | intel_dp_set_idle_link_train(intel_dp); |
| 324 | 304 | ||
| 325 | if (channel_eq) { | 305 | if (channel_eq) |
| 326 | intel_dp->train_set_valid = true; | ||
| 327 | DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); | 306 | DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); |
| 328 | } | ||
| 329 | } | 307 | } |
| 330 | 308 | ||
| 331 | void intel_dp_stop_link_train(struct intel_dp *intel_dp) | 309 | void intel_dp_stop_link_train(struct intel_dp *intel_dp) |
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 3ac705936b04..baf6f5584cbd 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
| @@ -366,6 +366,9 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, | |||
| 366 | DPLL_ID_PCH_PLL_B); | 366 | DPLL_ID_PCH_PLL_B); |
| 367 | } | 367 | } |
| 368 | 368 | ||
| 369 | if (!pll) | ||
| 370 | return NULL; | ||
| 371 | |||
| 369 | /* reference the pll */ | 372 | /* reference the pll */ |
| 370 | intel_reference_shared_dpll(pll, crtc_state); | 373 | intel_reference_shared_dpll(pll, crtc_state); |
| 371 | 374 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a28b4aac1e02..f7f0f01814f6 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -863,8 +863,6 @@ struct intel_dp { | |||
| 863 | /* This is called before a link training is starterd */ | 863 | /* This is called before a link training is starterd */ |
| 864 | void (*prepare_link_retrain)(struct intel_dp *intel_dp); | 864 | void (*prepare_link_retrain)(struct intel_dp *intel_dp); |
| 865 | 865 | ||
| 866 | bool train_set_valid; | ||
| 867 | |||
| 868 | /* Displayport compliance testing */ | 866 | /* Displayport compliance testing */ |
| 869 | unsigned long compliance_test_type; | 867 | unsigned long compliance_test_type; |
| 870 | unsigned long compliance_test_data; | 868 | unsigned long compliance_test_data; |
| @@ -1284,7 +1282,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *); | |||
| 1284 | void intel_csr_ucode_resume(struct drm_i915_private *); | 1282 | void intel_csr_ucode_resume(struct drm_i915_private *); |
| 1285 | 1283 | ||
| 1286 | /* intel_dp.c */ | 1284 | /* intel_dp.c */ |
| 1287 | void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); | 1285 | bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); |
| 1288 | bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | 1286 | bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, |
| 1289 | struct intel_connector *intel_connector); | 1287 | struct intel_connector *intel_connector); |
| 1290 | void intel_dp_set_link_params(struct intel_dp *intel_dp, | 1288 | void intel_dp_set_link_params(struct intel_dp *intel_dp, |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 366ad6c67ce4..4756ef639648 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
| @@ -1545,6 +1545,9 @@ void intel_dsi_init(struct drm_device *dev) | |||
| 1545 | goto err; | 1545 | goto err; |
| 1546 | } | 1546 | } |
| 1547 | 1547 | ||
| 1548 | connector->display_info.width_mm = fixed_mode->width_mm; | ||
| 1549 | connector->display_info.height_mm = fixed_mode->height_mm; | ||
| 1550 | |||
| 1548 | intel_panel_init(&intel_connector->panel, fixed_mode, NULL); | 1551 | intel_panel_init(&intel_connector->panel, fixed_mode, NULL); |
| 1549 | 1552 | ||
| 1550 | intel_dsi_add_properties(intel_connector); | 1553 | intel_dsi_add_properties(intel_connector); |
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index d5a7cfec589b..647127f3aaff 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
| @@ -824,8 +824,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc) | |||
| 824 | { | 824 | { |
| 825 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | 825 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
| 826 | struct intel_fbc *fbc = &dev_priv->fbc; | 826 | struct intel_fbc *fbc = &dev_priv->fbc; |
| 827 | bool enable_by_default = IS_HASWELL(dev_priv) || | 827 | bool enable_by_default = IS_BROADWELL(dev_priv); |
| 828 | IS_BROADWELL(dev_priv); | ||
| 829 | 828 | ||
| 830 | if (intel_vgpu_active(dev_priv->dev)) { | 829 | if (intel_vgpu_active(dev_priv->dev)) { |
| 831 | fbc->no_fbc_reason = "VGPU is active"; | 830 | fbc->no_fbc_reason = "VGPU is active"; |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 2c3bd9c2573e..a8844702d11b 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -2142,6 +2142,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | |||
| 2142 | enum port port = intel_dig_port->port; | 2142 | enum port port = intel_dig_port->port; |
| 2143 | uint8_t alternate_ddc_pin; | 2143 | uint8_t alternate_ddc_pin; |
| 2144 | 2144 | ||
| 2145 | DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", | ||
| 2146 | port_name(port)); | ||
| 2147 | |||
| 2145 | if (WARN(intel_dig_port->max_lanes < 4, | 2148 | if (WARN(intel_dig_port->max_lanes < 4, |
| 2146 | "Not enough lanes (%d) for HDMI on port %c\n", | 2149 | "Not enough lanes (%d) for HDMI on port %c\n", |
| 2147 | intel_dig_port->max_lanes, port_name(port))) | 2150 | intel_dig_port->max_lanes, port_name(port))) |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index bc53c0dd34d0..96281e628d2a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
| @@ -1082,6 +1082,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
| 1082 | fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); | 1082 | fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); |
| 1083 | if (fixed_mode) { | 1083 | if (fixed_mode) { |
| 1084 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; | 1084 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; |
| 1085 | connector->display_info.width_mm = fixed_mode->width_mm; | ||
| 1086 | connector->display_info.height_mm = fixed_mode->height_mm; | ||
| 1085 | goto out; | 1087 | goto out; |
| 1086 | } | 1088 | } |
| 1087 | } | 1089 | } |
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index c15051de8023..44fb0b35eed3 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h | |||
| @@ -403,9 +403,10 @@ struct lvds_dvo_timing { | |||
| 403 | u8 vsync_off:4; | 403 | u8 vsync_off:4; |
| 404 | u8 rsvd0:6; | 404 | u8 rsvd0:6; |
| 405 | u8 hsync_off_hi:2; | 405 | u8 hsync_off_hi:2; |
| 406 | u8 h_image; | 406 | u8 himage_lo; |
| 407 | u8 v_image; | 407 | u8 vimage_lo; |
| 408 | u8 max_hv; | 408 | u8 vimage_hi:4; |
| 409 | u8 himage_hi:4; | ||
| 409 | u8 h_border; | 410 | u8 h_border; |
| 410 | u8 v_border; | 411 | u8 v_border; |
| 411 | u8 rsvd1:3; | 412 | u8 rsvd1:3; |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index fbe304ee6c80..2aec27dbb5bb 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
| @@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 408 | } | 408 | } |
| 409 | 409 | ||
| 410 | adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); | 410 | adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); |
| 411 | if (!adreno_gpu->memptrs) { | 411 | if (IS_ERR(adreno_gpu->memptrs)) { |
| 412 | dev_err(drm->dev, "could not vmap memptrs\n"); | 412 | dev_err(drm->dev, "could not vmap memptrs\n"); |
| 413 | return -ENOMEM; | 413 | return -ENOMEM; |
| 414 | } | 414 | } |
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index d9759bf3482e..c6cf837c5193 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c | |||
| @@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, | |||
| 159 | dev->mode_config.fb_base = paddr; | 159 | dev->mode_config.fb_base = paddr; |
| 160 | 160 | ||
| 161 | fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); | 161 | fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); |
| 162 | if (IS_ERR(fbi->screen_base)) { | ||
| 163 | ret = PTR_ERR(fbi->screen_base); | ||
| 164 | goto fail_unlock; | ||
| 165 | } | ||
| 162 | fbi->screen_size = fbdev->bo->size; | 166 | fbi->screen_size = fbdev->bo->size; |
| 163 | fbi->fix.smem_start = paddr; | 167 | fbi->fix.smem_start = paddr; |
| 164 | fbi->fix.smem_len = fbdev->bo->size; | 168 | fbi->fix.smem_len = fbdev->bo->size; |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 7daf4054dd2b..69836f5685b1 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj) | |||
| 398 | return ERR_CAST(pages); | 398 | return ERR_CAST(pages); |
| 399 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | 399 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, |
| 400 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | 400 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); |
| 401 | if (msm_obj->vaddr == NULL) | ||
| 402 | return ERR_PTR(-ENOMEM); | ||
| 401 | } | 403 | } |
| 402 | return msm_obj->vaddr; | 404 | return msm_obj->vaddr; |
| 403 | } | 405 | } |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index b89ca5174863..eb4bb8b2f3a5 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
| @@ -40,12 +40,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, | |||
| 40 | 40 | ||
| 41 | submit->dev = dev; | 41 | submit->dev = dev; |
| 42 | submit->gpu = gpu; | 42 | submit->gpu = gpu; |
| 43 | submit->fence = NULL; | ||
| 43 | submit->pid = get_pid(task_pid(current)); | 44 | submit->pid = get_pid(task_pid(current)); |
| 44 | 45 | ||
| 45 | /* initially, until copy_from_user() and bo lookup succeeds: */ | 46 | /* initially, until copy_from_user() and bo lookup succeeds: */ |
| 46 | submit->nr_bos = 0; | 47 | submit->nr_bos = 0; |
| 47 | submit->nr_cmds = 0; | 48 | submit->nr_cmds = 0; |
| 48 | 49 | ||
| 50 | INIT_LIST_HEAD(&submit->node); | ||
| 49 | INIT_LIST_HEAD(&submit->bo_list); | 51 | INIT_LIST_HEAD(&submit->bo_list); |
| 50 | ww_acquire_init(&submit->ticket, &reservation_ww_class); | 52 | ww_acquire_init(&submit->ticket, &reservation_ww_class); |
| 51 | 53 | ||
| @@ -75,6 +77,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
| 75 | void __user *userptr = | 77 | void __user *userptr = |
| 76 | u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); | 78 | u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); |
| 77 | 79 | ||
| 80 | /* make sure we don't have garbage flags, in case we hit | ||
| 81 | * error path before flags is initialized: | ||
| 82 | */ | ||
| 83 | submit->bos[i].flags = 0; | ||
| 84 | |||
| 78 | ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); | 85 | ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); |
| 79 | if (ret) { | 86 | if (ret) { |
| 80 | ret = -EFAULT; | 87 | ret = -EFAULT; |
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index b48f73ac6389..0857710c2ff2 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c | |||
| @@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) | |||
| 312 | struct msm_gem_object *obj = submit->bos[idx].obj; | 312 | struct msm_gem_object *obj = submit->bos[idx].obj; |
| 313 | const char *buf = msm_gem_vaddr_locked(&obj->base); | 313 | const char *buf = msm_gem_vaddr_locked(&obj->base); |
| 314 | 314 | ||
| 315 | if (IS_ERR(buf)) | ||
| 316 | continue; | ||
| 317 | |||
| 315 | buf += iova - submit->bos[idx].iova; | 318 | buf += iova - submit->bos[idx].iova; |
| 316 | 319 | ||
| 317 | rd_write_section(rd, RD_GPUADDR, | 320 | rd_write_section(rd, RD_GPUADDR, |
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 1f14b908b221..42f5359cf988 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c | |||
| @@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) | |||
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | ring->start = msm_gem_vaddr_locked(ring->bo); | 42 | ring->start = msm_gem_vaddr_locked(ring->bo); |
| 43 | if (IS_ERR(ring->start)) { | ||
| 44 | ret = PTR_ERR(ring->start); | ||
| 45 | goto fail; | ||
| 46 | } | ||
| 43 | ring->end = ring->start + (size / 4); | 47 | ring->end = ring->start + (size / 4); |
| 44 | ring->cur = ring->start; | 48 | ring->cur = ring->start; |
| 45 | 49 | ||
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index c612dc1f1eb4..126a85cc81bc 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h | |||
| @@ -16,9 +16,9 @@ enum nvkm_devidx { | |||
| 16 | NVKM_SUBDEV_MC, | 16 | NVKM_SUBDEV_MC, |
| 17 | NVKM_SUBDEV_BUS, | 17 | NVKM_SUBDEV_BUS, |
| 18 | NVKM_SUBDEV_TIMER, | 18 | NVKM_SUBDEV_TIMER, |
| 19 | NVKM_SUBDEV_INSTMEM, | ||
| 19 | NVKM_SUBDEV_FB, | 20 | NVKM_SUBDEV_FB, |
| 20 | NVKM_SUBDEV_LTC, | 21 | NVKM_SUBDEV_LTC, |
| 21 | NVKM_SUBDEV_INSTMEM, | ||
| 22 | NVKM_SUBDEV_MMU, | 22 | NVKM_SUBDEV_MMU, |
| 23 | NVKM_SUBDEV_BAR, | 23 | NVKM_SUBDEV_BAR, |
| 24 | NVKM_SUBDEV_PMU, | 24 | NVKM_SUBDEV_PMU, |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h index db10c11f0595..c5a6ebd5a478 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h | |||
| @@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask, | |||
| 25 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *); | 25 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *); |
| 26 | 26 | ||
| 27 | struct nvbios_ocfg { | 27 | struct nvbios_ocfg { |
| 28 | u16 match; | 28 | u8 proto; |
| 29 | u8 flags; | ||
| 29 | u16 clkcmp[2]; | 30 | u16 clkcmp[2]; |
| 30 | }; | 31 | }; |
| 31 | 32 | ||
| @@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx, | |||
| 33 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len); | 34 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len); |
| 34 | u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx, | 35 | u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx, |
| 35 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); | 36 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); |
| 36 | u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type, | 37 | u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags, |
| 37 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); | 38 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); |
| 38 | u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz); | 39 | u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz); |
| 39 | #endif | 40 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 57aaf98a26f9..d1f248fd3506 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -552,6 +552,8 @@ nouveau_fbcon_init(struct drm_device *dev) | |||
| 552 | if (ret) | 552 | if (ret) |
| 553 | goto fini; | 553 | goto fini; |
| 554 | 554 | ||
| 555 | if (fbcon->helper.fbdev) | ||
| 556 | fbcon->helper.fbdev->pixmap.buf_align = 4; | ||
| 555 | return 0; | 557 | return 0; |
| 556 | 558 | ||
| 557 | fini: | 559 | fini: |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 0f3e4bb411cc..7d9248b8c664 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
| @@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 82 | uint32_t fg; | 82 | uint32_t fg; |
| 83 | uint32_t bg; | 83 | uint32_t bg; |
| 84 | uint32_t dsize; | 84 | uint32_t dsize; |
| 85 | uint32_t width; | ||
| 86 | uint32_t *data = (uint32_t *)image->data; | 85 | uint32_t *data = (uint32_t *)image->data; |
| 87 | int ret; | 86 | int ret; |
| 88 | 87 | ||
| @@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 93 | if (ret) | 92 | if (ret) |
| 94 | return ret; | 93 | return ret; |
| 95 | 94 | ||
| 96 | width = ALIGN(image->width, 8); | ||
| 97 | dsize = ALIGN(width * image->height, 32) >> 5; | ||
| 98 | |||
| 99 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | 95 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 100 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | 96 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { |
| 101 | fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; | 97 | fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; |
| @@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 111 | ((image->dx + image->width) & 0xffff)); | 107 | ((image->dx + image->width) & 0xffff)); |
| 112 | OUT_RING(chan, bg); | 108 | OUT_RING(chan, bg); |
| 113 | OUT_RING(chan, fg); | 109 | OUT_RING(chan, fg); |
| 114 | OUT_RING(chan, (image->height << 16) | width); | 110 | OUT_RING(chan, (image->height << 16) | image->width); |
| 115 | OUT_RING(chan, (image->height << 16) | image->width); | 111 | OUT_RING(chan, (image->height << 16) | image->width); |
| 116 | OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); | 112 | OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); |
| 117 | 113 | ||
| 114 | dsize = ALIGN(image->width * image->height, 32) >> 5; | ||
| 118 | while (dsize) { | 115 | while (dsize) { |
| 119 | int iter_len = dsize > 128 ? 128 : dsize; | 116 | int iter_len = dsize > 128 ? 128 : dsize; |
| 120 | 117 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 33d9ee0fac40..1aeb698e9707 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
| @@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 95 | struct nouveau_fbdev *nfbdev = info->par; | 95 | struct nouveau_fbdev *nfbdev = info->par; |
| 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); | 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); |
| 97 | struct nouveau_channel *chan = drm->channel; | 97 | struct nouveau_channel *chan = drm->channel; |
| 98 | uint32_t width, dwords, *data = (uint32_t *)image->data; | 98 | uint32_t dwords, *data = (uint32_t *)image->data; |
| 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); | 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); |
| 100 | uint32_t *palette = info->pseudo_palette; | 100 | uint32_t *palette = info->pseudo_palette; |
| 101 | int ret; | 101 | int ret; |
| @@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 107 | if (ret) | 107 | if (ret) |
| 108 | return ret; | 108 | return ret; |
| 109 | 109 | ||
| 110 | width = ALIGN(image->width, 32); | ||
| 111 | dwords = (width * image->height) >> 5; | ||
| 112 | |||
| 113 | BEGIN_NV04(chan, NvSub2D, 0x0814, 2); | 110 | BEGIN_NV04(chan, NvSub2D, 0x0814, 2); |
| 114 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | 111 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 115 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | 112 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { |
| @@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 128 | OUT_RING(chan, 0); | 125 | OUT_RING(chan, 0); |
| 129 | OUT_RING(chan, image->dy); | 126 | OUT_RING(chan, image->dy); |
| 130 | 127 | ||
| 128 | dwords = ALIGN(image->width * image->height, 32) >> 5; | ||
| 131 | while (dwords) { | 129 | while (dwords) { |
| 132 | int push = dwords > 2047 ? 2047 : dwords; | 130 | int push = dwords > 2047 ? 2047 : dwords; |
| 133 | 131 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index a0913359ac05..839f4c8c1805 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c | |||
| @@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 95 | struct nouveau_fbdev *nfbdev = info->par; | 95 | struct nouveau_fbdev *nfbdev = info->par; |
| 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); | 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); |
| 97 | struct nouveau_channel *chan = drm->channel; | 97 | struct nouveau_channel *chan = drm->channel; |
| 98 | uint32_t width, dwords, *data = (uint32_t *)image->data; | 98 | uint32_t dwords, *data = (uint32_t *)image->data; |
| 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); | 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); |
| 100 | uint32_t *palette = info->pseudo_palette; | 100 | uint32_t *palette = info->pseudo_palette; |
| 101 | int ret; | 101 | int ret; |
| @@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 107 | if (ret) | 107 | if (ret) |
| 108 | return ret; | 108 | return ret; |
| 109 | 109 | ||
| 110 | width = ALIGN(image->width, 32); | ||
| 111 | dwords = (width * image->height) >> 5; | ||
| 112 | |||
| 113 | BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); | 110 | BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); |
| 114 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | 111 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 115 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | 112 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { |
| @@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 128 | OUT_RING (chan, 0); | 125 | OUT_RING (chan, 0); |
| 129 | OUT_RING (chan, image->dy); | 126 | OUT_RING (chan, image->dy); |
| 130 | 127 | ||
| 128 | dwords = ALIGN(image->width * image->height, 32) >> 5; | ||
| 131 | while (dwords) { | 129 | while (dwords) { |
| 132 | int push = dwords > 2047 ? 2047 : dwords; | 130 | int push = dwords > 2047 ? 2047 : dwords; |
| 133 | 131 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index 18fab3973ce5..62ad0300cfa5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c | |||
| @@ -1614,7 +1614,7 @@ nvkm_device_pci_func = { | |||
| 1614 | .fini = nvkm_device_pci_fini, | 1614 | .fini = nvkm_device_pci_fini, |
| 1615 | .resource_addr = nvkm_device_pci_resource_addr, | 1615 | .resource_addr = nvkm_device_pci_resource_addr, |
| 1616 | .resource_size = nvkm_device_pci_resource_size, | 1616 | .resource_size = nvkm_device_pci_resource_size, |
| 1617 | .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64), | 1617 | .cpu_coherent = !IS_ENABLED(CONFIG_ARM), |
| 1618 | }; | 1618 | }; |
| 1619 | 1619 | ||
| 1620 | int | 1620 | int |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild index a74c5dd27dc0..e2a64ed14b22 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild | |||
| @@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o | |||
| 18 | nvkm-y += nvkm/engine/disp/sornv50.o | 18 | nvkm-y += nvkm/engine/disp/sornv50.o |
| 19 | nvkm-y += nvkm/engine/disp/sorg94.o | 19 | nvkm-y += nvkm/engine/disp/sorg94.o |
| 20 | nvkm-y += nvkm/engine/disp/sorgf119.o | 20 | nvkm-y += nvkm/engine/disp/sorgf119.o |
| 21 | nvkm-y += nvkm/engine/disp/sorgm107.o | ||
| 21 | nvkm-y += nvkm/engine/disp/sorgm200.o | 22 | nvkm-y += nvkm/engine/disp/sorgm200.o |
| 22 | nvkm-y += nvkm/engine/disp/dport.o | 23 | nvkm-y += nvkm/engine/disp/dport.o |
| 23 | 24 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c index f0314664349c..5dd34382f55a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c | |||
| @@ -76,6 +76,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, | |||
| 76 | mask |= 0x0001 << or; | 76 | mask |= 0x0001 << or; |
| 77 | mask |= 0x0100 << head; | 77 | mask |= 0x0100 << head; |
| 78 | 78 | ||
| 79 | |||
| 79 | list_for_each_entry(outp, &disp->base.outp, head) { | 80 | list_for_each_entry(outp, &disp->base.outp, head) { |
| 80 | if ((outp->info.hasht & 0xff) == type && | 81 | if ((outp->info.hasht & 0xff) == type && |
| 81 | (outp->info.hashm & mask) == mask) { | 82 | (outp->info.hashm & mask) == mask) { |
| @@ -155,25 +156,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) | |||
| 155 | if (!outp) | 156 | if (!outp) |
| 156 | return NULL; | 157 | return NULL; |
| 157 | 158 | ||
| 159 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 158 | switch (outp->info.type) { | 160 | switch (outp->info.type) { |
| 159 | case DCB_OUTPUT_TMDS: | 161 | case DCB_OUTPUT_TMDS: |
| 160 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 161 | if (*conf == 5) | 162 | if (*conf == 5) |
| 162 | *conf |= 0x0100; | 163 | *conf |= 0x0100; |
| 163 | break; | 164 | break; |
| 164 | case DCB_OUTPUT_LVDS: | 165 | case DCB_OUTPUT_LVDS: |
| 165 | *conf = disp->sor.lvdsconf; | 166 | *conf |= disp->sor.lvdsconf; |
| 166 | break; | ||
| 167 | case DCB_OUTPUT_DP: | ||
| 168 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 169 | break; | 167 | break; |
| 170 | case DCB_OUTPUT_ANALOG: | ||
| 171 | default: | 168 | default: |
| 172 | *conf = 0x00ff; | ||
| 173 | break; | 169 | break; |
| 174 | } | 170 | } |
| 175 | 171 | ||
| 176 | data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); | 172 | data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8, |
| 173 | &ver, &hdr, &cnt, &len, &info2); | ||
| 177 | if (data && id < 0xff) { | 174 | if (data && id < 0xff) { |
| 178 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); | 175 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); |
| 179 | if (data) { | 176 | if (data) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c index b6944142d616..f4b9cf8574be 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c | |||
| @@ -36,7 +36,7 @@ gm107_disp = { | |||
| 36 | .outp.internal.crt = nv50_dac_output_new, | 36 | .outp.internal.crt = nv50_dac_output_new, |
| 37 | .outp.internal.tmds = nv50_sor_output_new, | 37 | .outp.internal.tmds = nv50_sor_output_new, |
| 38 | .outp.internal.lvds = nv50_sor_output_new, | 38 | .outp.internal.lvds = nv50_sor_output_new, |
| 39 | .outp.internal.dp = gf119_sor_dp_new, | 39 | .outp.internal.dp = gm107_sor_dp_new, |
| 40 | .dac.nr = 3, | 40 | .dac.nr = 3, |
| 41 | .dac.power = nv50_dac_power, | 41 | .dac.power = nv50_dac_power, |
| 42 | .dac.sense = nv50_dac_sense, | 42 | .dac.sense = nv50_dac_sense, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c index 4226d2153b9c..fcb1b0c46d64 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c | |||
| @@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) | |||
| 387 | if (!outp) | 387 | if (!outp) |
| 388 | return NULL; | 388 | return NULL; |
| 389 | 389 | ||
| 390 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 390 | if (outp->info.location == 0) { | 391 | if (outp->info.location == 0) { |
| 391 | switch (outp->info.type) { | 392 | switch (outp->info.type) { |
| 392 | case DCB_OUTPUT_TMDS: | 393 | case DCB_OUTPUT_TMDS: |
| 393 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 394 | if (*conf == 5) | 394 | if (*conf == 5) |
| 395 | *conf |= 0x0100; | 395 | *conf |= 0x0100; |
| 396 | break; | 396 | break; |
| 397 | case DCB_OUTPUT_LVDS: | 397 | case DCB_OUTPUT_LVDS: |
| 398 | *conf = disp->sor.lvdsconf; | 398 | *conf |= disp->sor.lvdsconf; |
| 399 | break; | 399 | break; |
| 400 | case DCB_OUTPUT_DP: | ||
| 401 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 402 | break; | ||
| 403 | case DCB_OUTPUT_ANALOG: | ||
| 404 | default: | 400 | default: |
| 405 | *conf = 0x00ff; | ||
| 406 | break; | 401 | break; |
| 407 | } | 402 | } |
| 408 | } else { | 403 | } else { |
| @@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) | |||
| 410 | pclk = pclk / 2; | 405 | pclk = pclk / 2; |
| 411 | } | 406 | } |
| 412 | 407 | ||
| 413 | data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); | 408 | data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8, |
| 409 | &ver, &hdr, &cnt, &len, &info2); | ||
| 414 | if (data && id < 0xff) { | 410 | if (data && id < 0xff) { |
| 415 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); | 411 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); |
| 416 | if (data) { | 412 | if (data) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h index e9067ba4e179..4e983f6d7032 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h | |||
| @@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int); | |||
| 62 | int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, | 62 | int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, |
| 63 | struct nvkm_output **); | 63 | struct nvkm_output **); |
| 64 | int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool); | 64 | int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool); |
| 65 | int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int); | ||
| 65 | 66 | ||
| 66 | int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, | 67 | int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, |
| 67 | struct nvkm_output **); | 68 | struct nvkm_output **); |
| 69 | int gm107_sor_dp_pattern(struct nvkm_output_dp *, int); | ||
| 70 | |||
| 71 | int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, | ||
| 72 | struct nvkm_output **); | ||
| 68 | #endif | 73 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c index b4b41b135643..22706c0a54b5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c | |||
| @@ -40,8 +40,7 @@ static int | |||
| 40 | gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) | 40 | gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) |
| 41 | { | 41 | { |
| 42 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | 42 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; |
| 43 | const u32 loff = gf119_sor_loff(outp); | 43 | nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern); |
| 44 | nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); | ||
| 45 | return 0; | 44 | return 0; |
| 46 | } | 45 | } |
| 47 | 46 | ||
| @@ -64,7 +63,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef) | |||
| 64 | return 0; | 63 | return 0; |
| 65 | } | 64 | } |
| 66 | 65 | ||
| 67 | static int | 66 | int |
| 68 | gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp, | 67 | gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp, |
| 69 | int ln, int vs, int pe, int pc) | 68 | int ln, int vs, int pe, int pc) |
| 70 | { | 69 | { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c new file mode 100644 index 000000000000..37790b2617c5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2016 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
| 23 | */ | ||
| 24 | #include "nv50.h" | ||
| 25 | #include "outpdp.h" | ||
| 26 | |||
| 27 | int | ||
| 28 | gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) | ||
| 29 | { | ||
| 30 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | ||
| 31 | const u32 soff = outp->base.or * 0x800; | ||
| 32 | const u32 data = 0x01010101 * pattern; | ||
| 33 | if (outp->base.info.sorconf.link & 1) | ||
| 34 | nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data); | ||
| 35 | else | ||
| 36 | nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data); | ||
| 37 | return 0; | ||
| 38 | } | ||
| 39 | |||
| 40 | static const struct nvkm_output_dp_func | ||
| 41 | gm107_sor_dp_func = { | ||
| 42 | .pattern = gm107_sor_dp_pattern, | ||
| 43 | .lnk_pwr = g94_sor_dp_lnk_pwr, | ||
| 44 | .lnk_ctl = gf119_sor_dp_lnk_ctl, | ||
| 45 | .drv_ctl = gf119_sor_dp_drv_ctl, | ||
| 46 | }; | ||
| 47 | |||
| 48 | int | ||
| 49 | gm107_sor_dp_new(struct nvkm_disp *disp, int index, | ||
| 50 | struct dcb_output *dcbE, struct nvkm_output **poutp) | ||
| 51 | { | ||
| 52 | return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp); | ||
| 53 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c index 2cfbef9c344f..c44fa7ea672a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c | |||
| @@ -57,19 +57,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane) | |||
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | static int | 59 | static int |
| 60 | gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) | ||
| 61 | { | ||
| 62 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | ||
| 63 | const u32 soff = gm200_sor_soff(outp); | ||
| 64 | const u32 data = 0x01010101 * pattern; | ||
| 65 | if (outp->base.info.sorconf.link & 1) | ||
| 66 | nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data); | ||
| 67 | else | ||
| 68 | nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data); | ||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | static int | ||
| 73 | gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) | 60 | gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) |
| 74 | { | 61 | { |
| 75 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | 62 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; |
| @@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp, | |||
| 129 | 116 | ||
| 130 | static const struct nvkm_output_dp_func | 117 | static const struct nvkm_output_dp_func |
| 131 | gm200_sor_dp_func = { | 118 | gm200_sor_dp_func = { |
| 132 | .pattern = gm200_sor_dp_pattern, | 119 | .pattern = gm107_sor_dp_pattern, |
| 133 | .lnk_pwr = gm200_sor_dp_lnk_pwr, | 120 | .lnk_pwr = gm200_sor_dp_lnk_pwr, |
| 134 | .lnk_ctl = gf119_sor_dp_lnk_ctl, | 121 | .lnk_ctl = gf119_sor_dp_lnk_ctl, |
| 135 | .drv_ctl = gm200_sor_dp_drv_ctl, | 122 | .drv_ctl = gm200_sor_dp_drv_ctl, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 9513badb8220..ae9ab5b1ab97 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | |||
| @@ -949,22 +949,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc) | |||
| 949 | } | 949 | } |
| 950 | 950 | ||
| 951 | static const struct nvkm_enum gf100_mp_warp_error[] = { | 951 | static const struct nvkm_enum gf100_mp_warp_error[] = { |
| 952 | { 0x00, "NO_ERROR" }, | 952 | { 0x01, "STACK_ERROR" }, |
| 953 | { 0x01, "STACK_MISMATCH" }, | 953 | { 0x02, "API_STACK_ERROR" }, |
| 954 | { 0x03, "RET_EMPTY_STACK_ERROR" }, | ||
| 955 | { 0x04, "PC_WRAP" }, | ||
| 954 | { 0x05, "MISALIGNED_PC" }, | 956 | { 0x05, "MISALIGNED_PC" }, |
| 955 | { 0x08, "MISALIGNED_GPR" }, | 957 | { 0x06, "PC_OVERFLOW" }, |
| 956 | { 0x09, "INVALID_OPCODE" }, | 958 | { 0x07, "MISALIGNED_IMMC_ADDR" }, |
| 957 | { 0x0d, "GPR_OUT_OF_BOUNDS" }, | 959 | { 0x08, "MISALIGNED_REG" }, |
| 958 | { 0x0e, "MEM_OUT_OF_BOUNDS" }, | 960 | { 0x09, "ILLEGAL_INSTR_ENCODING" }, |
| 959 | { 0x0f, "UNALIGNED_MEM_ACCESS" }, | 961 | { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" }, |
| 962 | { 0x0b, "ILLEGAL_INSTR_PARAM" }, | ||
| 963 | { 0x0c, "INVALID_CONST_ADDR" }, | ||
| 964 | { 0x0d, "OOR_REG" }, | ||
| 965 | { 0x0e, "OOR_ADDR" }, | ||
| 966 | { 0x0f, "MISALIGNED_ADDR" }, | ||
| 960 | { 0x10, "INVALID_ADDR_SPACE" }, | 967 | { 0x10, "INVALID_ADDR_SPACE" }, |
| 961 | { 0x11, "INVALID_PARAM" }, | 968 | { 0x11, "ILLEGAL_INSTR_PARAM2" }, |
| 969 | { 0x12, "INVALID_CONST_ADDR_LDC" }, | ||
| 970 | { 0x13, "GEOMETRY_SM_ERROR" }, | ||
| 971 | { 0x14, "DIVERGENT" }, | ||
| 972 | { 0x15, "WARP_EXIT" }, | ||
| 962 | {} | 973 | {} |
| 963 | }; | 974 | }; |
| 964 | 975 | ||
| 965 | static const struct nvkm_bitfield gf100_mp_global_error[] = { | 976 | static const struct nvkm_bitfield gf100_mp_global_error[] = { |
| 977 | { 0x00000001, "SM_TO_SM_FAULT" }, | ||
| 978 | { 0x00000002, "L1_ERROR" }, | ||
| 966 | { 0x00000004, "MULTIPLE_WARP_ERRORS" }, | 979 | { 0x00000004, "MULTIPLE_WARP_ERRORS" }, |
| 967 | { 0x00000008, "OUT_OF_STACK_SPACE" }, | 980 | { 0x00000008, "PHYSICAL_STACK_OVERFLOW" }, |
| 981 | { 0x00000010, "BPT_INT" }, | ||
| 982 | { 0x00000020, "BPT_PAUSE" }, | ||
| 983 | { 0x00000040, "SINGLE_STEP_COMPLETE" }, | ||
| 984 | { 0x20000000, "ECC_SEC_ERROR" }, | ||
| 985 | { 0x40000000, "ECC_DED_ERROR" }, | ||
| 986 | { 0x80000000, "TIMEOUT" }, | ||
| 968 | {} | 987 | {} |
| 969 | }; | 988 | }; |
| 970 | 989 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c index a5e92135cd77..9efb1b48cd54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c | |||
| @@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, | |||
| 141 | { | 141 | { |
| 142 | u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); | 142 | u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); |
| 143 | if (data) { | 143 | if (data) { |
| 144 | info->match = nvbios_rd16(bios, data + 0x00); | 144 | info->proto = nvbios_rd08(bios, data + 0x00); |
| 145 | info->flags = nvbios_rd16(bios, data + 0x01); | ||
| 145 | info->clkcmp[0] = nvbios_rd16(bios, data + 0x02); | 146 | info->clkcmp[0] = nvbios_rd16(bios, data + 0x02); |
| 146 | info->clkcmp[1] = nvbios_rd16(bios, data + 0x04); | 147 | info->clkcmp[1] = nvbios_rd16(bios, data + 0x04); |
| 147 | } | 148 | } |
| @@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, | |||
| 149 | } | 150 | } |
| 150 | 151 | ||
| 151 | u16 | 152 | u16 |
| 152 | nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type, | 153 | nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags, |
| 153 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info) | 154 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info) |
| 154 | { | 155 | { |
| 155 | u16 data, idx = 0; | 156 | u16 data, idx = 0; |
| 156 | while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) { | 157 | while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) { |
| 157 | if (info->match == type) | 158 | if ((info->proto == proto || info->proto == 0xff) && |
| 159 | (info->flags == flags)) | ||
| 158 | break; | 160 | break; |
| 159 | } | 161 | } |
| 160 | return data; | 162 | return data; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c index 323c79abe468..41bd5d0f7692 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c | |||
| @@ -276,6 +276,8 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev) | |||
| 276 | struct pwr_rail_t *r = &stbl.rail[i]; | 276 | struct pwr_rail_t *r = &stbl.rail[i]; |
| 277 | struct nvkm_iccsense_rail *rail; | 277 | struct nvkm_iccsense_rail *rail; |
| 278 | struct nvkm_iccsense_sensor *sensor; | 278 | struct nvkm_iccsense_sensor *sensor; |
| 279 | int (*read)(struct nvkm_iccsense *, | ||
| 280 | struct nvkm_iccsense_rail *); | ||
| 279 | 281 | ||
| 280 | if (!r->mode || r->resistor_mohm == 0) | 282 | if (!r->mode || r->resistor_mohm == 0) |
| 281 | continue; | 283 | continue; |
| @@ -284,31 +286,31 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev) | |||
| 284 | if (!sensor) | 286 | if (!sensor) |
| 285 | continue; | 287 | continue; |
| 286 | 288 | ||
| 287 | rail = kmalloc(sizeof(*rail), GFP_KERNEL); | ||
| 288 | if (!rail) | ||
| 289 | return -ENOMEM; | ||
| 290 | |||
| 291 | switch (sensor->type) { | 289 | switch (sensor->type) { |
| 292 | case NVBIOS_EXTDEV_INA209: | 290 | case NVBIOS_EXTDEV_INA209: |
| 293 | if (r->rail != 0) | 291 | if (r->rail != 0) |
| 294 | continue; | 292 | continue; |
| 295 | rail->read = nvkm_iccsense_ina209_read; | 293 | read = nvkm_iccsense_ina209_read; |
| 296 | break; | 294 | break; |
| 297 | case NVBIOS_EXTDEV_INA219: | 295 | case NVBIOS_EXTDEV_INA219: |
| 298 | if (r->rail != 0) | 296 | if (r->rail != 0) |
| 299 | continue; | 297 | continue; |
| 300 | rail->read = nvkm_iccsense_ina219_read; | 298 | read = nvkm_iccsense_ina219_read; |
| 301 | break; | 299 | break; |
| 302 | case NVBIOS_EXTDEV_INA3221: | 300 | case NVBIOS_EXTDEV_INA3221: |
| 303 | if (r->rail >= 3) | 301 | if (r->rail >= 3) |
| 304 | continue; | 302 | continue; |
| 305 | rail->read = nvkm_iccsense_ina3221_read; | 303 | read = nvkm_iccsense_ina3221_read; |
| 306 | break; | 304 | break; |
| 307 | default: | 305 | default: |
| 308 | continue; | 306 | continue; |
| 309 | } | 307 | } |
| 310 | 308 | ||
| 309 | rail = kmalloc(sizeof(*rail), GFP_KERNEL); | ||
| 310 | if (!rail) | ||
| 311 | return -ENOMEM; | ||
| 311 | sensor->rail_mask |= 1 << r->rail; | 312 | sensor->rail_mask |= 1 << r->rail; |
| 313 | rail->read = read; | ||
| 312 | rail->sensor = sensor; | 314 | rail->sensor = sensor; |
| 313 | rail->idx = r->rail; | 315 | rail->idx = r->rail; |
| 314 | rail->mohm = r->resistor_mohm; | 316 | rail->mohm = r->resistor_mohm; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c index e292f5679418..389fb13a1998 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c | |||
| @@ -69,11 +69,11 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth) | |||
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | static void | 71 | static void |
| 72 | gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s) | 72 | gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s) |
| 73 | { | 73 | { |
| 74 | struct nvkm_subdev *subdev = <c->subdev; | 74 | struct nvkm_subdev *subdev = <c->subdev; |
| 75 | struct nvkm_device *device = subdev->device; | 75 | struct nvkm_device *device = subdev->device; |
| 76 | u32 base = 0x140000 + (c * 0x2000) + (s * 0x200); | 76 | u32 base = 0x140400 + (c * 0x2000) + (s * 0x200); |
| 77 | u32 stat = nvkm_rd32(device, base + 0x00c); | 77 | u32 stat = nvkm_rd32(device, base + 0x00c); |
| 78 | 78 | ||
| 79 | if (stat) { | 79 | if (stat) { |
| @@ -92,7 +92,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc) | |||
| 92 | while (mask) { | 92 | while (mask) { |
| 93 | u32 s, c = __ffs(mask); | 93 | u32 s, c = __ffs(mask); |
| 94 | for (s = 0; s < ltc->lts_nr; s++) | 94 | for (s = 0; s < ltc->lts_nr; s++) |
| 95 | gm107_ltc_lts_isr(ltc, c, s); | 95 | gm107_ltc_intr_lts(ltc, c, s); |
| 96 | mask &= ~(1 << c); | 96 | mask &= ~(1 << c); |
| 97 | } | 97 | } |
| 98 | } | 98 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c index 2a29bfd5125a..e18e0dc19ec8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c | |||
| @@ -46,7 +46,7 @@ static const struct nvkm_ltc_func | |||
| 46 | gm200_ltc = { | 46 | gm200_ltc = { |
| 47 | .oneinit = gm200_ltc_oneinit, | 47 | .oneinit = gm200_ltc_oneinit, |
| 48 | .init = gm200_ltc_init, | 48 | .init = gm200_ltc_init, |
| 49 | .intr = gm107_ltc_intr, /*XXX: not validated */ | 49 | .intr = gm107_ltc_intr, |
| 50 | .cbc_clear = gm107_ltc_cbc_clear, | 50 | .cbc_clear = gm107_ltc_cbc_clear, |
| 51 | .cbc_wait = gm107_ltc_cbc_wait, | 51 | .cbc_wait = gm107_ltc_cbc_wait, |
| 52 | .zbc = 16, | 52 | .zbc = 16, |
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 9ed8272e54ae..56c43f355ce3 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c | |||
| @@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev) | |||
| 1167 | { | 1167 | { |
| 1168 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | 1168 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); |
| 1169 | struct regulator *vdds_dsi; | 1169 | struct regulator *vdds_dsi; |
| 1170 | int r; | ||
| 1171 | 1170 | ||
| 1172 | if (dsi->vdds_dsi_reg != NULL) | 1171 | if (dsi->vdds_dsi_reg != NULL) |
| 1173 | return 0; | 1172 | return 0; |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index e129245eb8a9..9255c0e1e4a7 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c | |||
| @@ -120,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) | |||
| 120 | 120 | ||
| 121 | static int hdmi_init_regulator(void) | 121 | static int hdmi_init_regulator(void) |
| 122 | { | 122 | { |
| 123 | int r; | ||
| 124 | struct regulator *reg; | 123 | struct regulator *reg; |
| 125 | 124 | ||
| 126 | if (hdmi.vdda_reg != NULL) | 125 | if (hdmi.vdda_reg != NULL) |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 2e216e2ea78c..259cd6e6d71c 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -589,7 +589,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 589 | if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) | 589 | if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) |
| 590 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; | 590 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
| 591 | /* use frac fb div on RS780/RS880 */ | 591 | /* use frac fb div on RS780/RS880 */ |
| 592 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) | 592 | if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) |
| 593 | && !radeon_crtc->ss_enabled) | ||
| 593 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; | 594 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
| 594 | if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) | 595 | if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) |
| 595 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; | 596 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
| @@ -626,7 +627,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 626 | if (radeon_crtc->ss.refdiv) { | 627 | if (radeon_crtc->ss.refdiv) { |
| 627 | radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; | 628 | radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; |
| 628 | radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; | 629 | radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; |
| 629 | if (ASIC_IS_AVIVO(rdev)) | 630 | if (rdev->family >= CHIP_RV770) |
| 630 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; | 631 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
| 631 | } | 632 | } |
| 632 | } | 633 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e721e6b2766e..21c44b2293bc 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |||
| 630 | /* | 630 | /* |
| 631 | * GPU helpers function. | 631 | * GPU helpers function. |
| 632 | */ | 632 | */ |
| 633 | |||
| 634 | /** | ||
| 635 | * radeon_device_is_virtual - check if we are running is a virtual environment | ||
| 636 | * | ||
| 637 | * Check if the asic has been passed through to a VM (all asics). | ||
| 638 | * Used at driver startup. | ||
| 639 | * Returns true if virtual or false if not. | ||
| 640 | */ | ||
| 641 | static bool radeon_device_is_virtual(void) | ||
| 642 | { | ||
| 643 | #ifdef CONFIG_X86 | ||
| 644 | return boot_cpu_has(X86_FEATURE_HYPERVISOR); | ||
| 645 | #else | ||
| 646 | return false; | ||
| 647 | #endif | ||
| 648 | } | ||
| 649 | |||
| 633 | /** | 650 | /** |
| 634 | * radeon_card_posted - check if the hw has already been initialized | 651 | * radeon_card_posted - check if the hw has already been initialized |
| 635 | * | 652 | * |
| @@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev) | |||
| 643 | { | 660 | { |
| 644 | uint32_t reg; | 661 | uint32_t reg; |
| 645 | 662 | ||
| 663 | /* for pass through, always force asic_init */ | ||
| 664 | if (radeon_device_is_virtual()) | ||
| 665 | return false; | ||
| 666 | |||
| 646 | /* required for EFI mode on macbook2,1 which uses an r5xx asic */ | 667 | /* required for EFI mode on macbook2,1 which uses an r5xx asic */ |
| 647 | if (efi_enabled(EFI_BOOT) && | 668 | if (efi_enabled(EFI_BOOT) && |
| 648 | (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && | 669 | (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && |
| @@ -1631,7 +1652,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, | |||
| 1631 | radeon_agp_suspend(rdev); | 1652 | radeon_agp_suspend(rdev); |
| 1632 | 1653 | ||
| 1633 | pci_save_state(dev->pdev); | 1654 | pci_save_state(dev->pdev); |
| 1634 | if (freeze && rdev->family >= CHIP_R600) { | 1655 | if (freeze && rdev->family >= CHIP_CEDAR) { |
| 1635 | rdev->asic->asic_reset(rdev, true); | 1656 | rdev->asic->asic_reset(rdev, true); |
| 1636 | pci_restore_state(dev->pdev); | 1657 | pci_restore_state(dev->pdev); |
| 1637 | } else if (suspend) { | 1658 | } else if (suspend) { |
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig index 99510e64e91a..a4b357db8856 100644 --- a/drivers/gpu/drm/sun4i/Kconfig +++ b/drivers/gpu/drm/sun4i/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config DRM_SUN4I | 1 | config DRM_SUN4I |
| 2 | tristate "DRM Support for Allwinner A10 Display Engine" | 2 | tristate "DRM Support for Allwinner A10 Display Engine" |
| 3 | depends on DRM && ARM | 3 | depends on DRM && ARM && COMMON_CLK |
| 4 | depends on ARCH_SUNXI || COMPILE_TEST | 4 | depends on ARCH_SUNXI || COMPILE_TEST |
| 5 | select DRM_GEM_CMA_HELPER | 5 | select DRM_GEM_CMA_HELPER |
| 6 | select DRM_KMS_HELPER | 6 | select DRM_KMS_HELPER |
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index f7a15c1a93bf..3ab560450a82 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c | |||
| @@ -190,7 +190,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend, | |||
| 190 | /* Get the physical address of the buffer in memory */ | 190 | /* Get the physical address of the buffer in memory */ |
| 191 | gem = drm_fb_cma_get_gem_obj(fb, 0); | 191 | gem = drm_fb_cma_get_gem_obj(fb, 0); |
| 192 | 192 | ||
| 193 | DRM_DEBUG_DRIVER("Using GEM @ 0x%x\n", gem->paddr); | 193 | DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr); |
| 194 | 194 | ||
| 195 | /* Compute the start of the displayed memory */ | 195 | /* Compute the start of the displayed memory */ |
| 196 | bpp = drm_format_plane_cpp(fb->pixel_format, 0); | 196 | bpp = drm_format_plane_cpp(fb->pixel_format, 0); |
| @@ -198,7 +198,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend, | |||
| 198 | paddr += (state->src_x >> 16) * bpp; | 198 | paddr += (state->src_x >> 16) * bpp; |
| 199 | paddr += (state->src_y >> 16) * fb->pitches[0]; | 199 | paddr += (state->src_y >> 16) * fb->pitches[0]; |
| 200 | 200 | ||
| 201 | DRM_DEBUG_DRIVER("Setting buffer address to 0x%x\n", paddr); | 201 | DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr); |
| 202 | 202 | ||
| 203 | /* Write the 32 lower bits of the address (in bits) */ | 203 | /* Write the 32 lower bits of the address (in bits) */ |
| 204 | lo_paddr = paddr << 3; | 204 | lo_paddr = paddr << 3; |
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c index 3ff668cb463c..5b3463197c48 100644 --- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c +++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c | |||
| @@ -72,14 +72,40 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw, | |||
| 72 | static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, | 72 | static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, |
| 73 | unsigned long *parent_rate) | 73 | unsigned long *parent_rate) |
| 74 | { | 74 | { |
| 75 | return *parent_rate / DIV_ROUND_CLOSEST(*parent_rate, rate); | 75 | unsigned long best_parent = 0; |
| 76 | u8 best_div = 1; | ||
| 77 | int i; | ||
| 78 | |||
| 79 | for (i = 6; i < 127; i++) { | ||
| 80 | unsigned long ideal = rate * i; | ||
| 81 | unsigned long rounded; | ||
| 82 | |||
| 83 | rounded = clk_hw_round_rate(clk_hw_get_parent(hw), | ||
| 84 | ideal); | ||
| 85 | |||
| 86 | if (rounded == ideal) { | ||
| 87 | best_parent = rounded; | ||
| 88 | best_div = i; | ||
| 89 | goto out; | ||
| 90 | } | ||
| 91 | |||
| 92 | if ((rounded < ideal) && (rounded > best_parent)) { | ||
| 93 | best_parent = rounded; | ||
| 94 | best_div = i; | ||
| 95 | } | ||
| 96 | } | ||
| 97 | |||
| 98 | out: | ||
| 99 | *parent_rate = best_parent; | ||
| 100 | |||
| 101 | return best_parent / best_div; | ||
| 76 | } | 102 | } |
| 77 | 103 | ||
| 78 | static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate, | 104 | static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate, |
| 79 | unsigned long parent_rate) | 105 | unsigned long parent_rate) |
| 80 | { | 106 | { |
| 81 | struct sun4i_dclk *dclk = hw_to_dclk(hw); | 107 | struct sun4i_dclk *dclk = hw_to_dclk(hw); |
| 82 | int div = DIV_ROUND_CLOSEST(parent_rate, rate); | 108 | u8 div = parent_rate / rate; |
| 83 | 109 | ||
| 84 | return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG, | 110 | return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG, |
| 85 | GENMASK(6, 0), div); | 111 | GENMASK(6, 0), div); |
| @@ -127,10 +153,14 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon) | |||
| 127 | const char *clk_name, *parent_name; | 153 | const char *clk_name, *parent_name; |
| 128 | struct clk_init_data init; | 154 | struct clk_init_data init; |
| 129 | struct sun4i_dclk *dclk; | 155 | struct sun4i_dclk *dclk; |
| 156 | int ret; | ||
| 130 | 157 | ||
| 131 | parent_name = __clk_get_name(tcon->sclk0); | 158 | parent_name = __clk_get_name(tcon->sclk0); |
| 132 | of_property_read_string_index(dev->of_node, "clock-output-names", 0, | 159 | ret = of_property_read_string_index(dev->of_node, |
| 133 | &clk_name); | 160 | "clock-output-names", 0, |
| 161 | &clk_name); | ||
| 162 | if (ret) | ||
| 163 | return ret; | ||
| 134 | 164 | ||
| 135 | dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL); | 165 | dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL); |
| 136 | if (!dclk) | 166 | if (!dclk) |
| @@ -140,6 +170,7 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon) | |||
| 140 | init.ops = &sun4i_dclk_ops; | 170 | init.ops = &sun4i_dclk_ops; |
| 141 | init.parent_names = &parent_name; | 171 | init.parent_names = &parent_name; |
| 142 | init.num_parents = 1; | 172 | init.num_parents = 1; |
| 173 | init.flags = CLK_SET_RATE_PARENT; | ||
| 143 | 174 | ||
| 144 | dclk->regmap = tcon->regs; | 175 | dclk->regmap = tcon->regs; |
| 145 | dclk->hw.init = &init; | 176 | dclk->hw.init = &init; |
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 76e922bb60e5..257d2b4f3645 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c | |||
| @@ -24,34 +24,6 @@ | |||
| 24 | #include "sun4i_layer.h" | 24 | #include "sun4i_layer.h" |
| 25 | #include "sun4i_tcon.h" | 25 | #include "sun4i_tcon.h" |
| 26 | 26 | ||
| 27 | static int sun4i_drv_connector_plug_all(struct drm_device *drm) | ||
| 28 | { | ||
| 29 | struct drm_connector *connector, *failed; | ||
| 30 | int ret; | ||
| 31 | |||
| 32 | mutex_lock(&drm->mode_config.mutex); | ||
| 33 | list_for_each_entry(connector, &drm->mode_config.connector_list, head) { | ||
| 34 | ret = drm_connector_register(connector); | ||
| 35 | if (ret) { | ||
| 36 | failed = connector; | ||
| 37 | goto err; | ||
| 38 | } | ||
| 39 | } | ||
| 40 | mutex_unlock(&drm->mode_config.mutex); | ||
| 41 | return 0; | ||
| 42 | |||
| 43 | err: | ||
| 44 | list_for_each_entry(connector, &drm->mode_config.connector_list, head) { | ||
| 45 | if (failed == connector) | ||
| 46 | break; | ||
| 47 | |||
| 48 | drm_connector_unregister(connector); | ||
| 49 | } | ||
| 50 | mutex_unlock(&drm->mode_config.mutex); | ||
| 51 | |||
| 52 | return ret; | ||
| 53 | } | ||
| 54 | |||
| 55 | static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe) | 27 | static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe) |
| 56 | { | 28 | { |
| 57 | struct sun4i_drv *drv = drm->dev_private; | 29 | struct sun4i_drv *drv = drm->dev_private; |
| @@ -125,6 +97,22 @@ static struct drm_driver sun4i_drv_driver = { | |||
| 125 | .disable_vblank = sun4i_drv_disable_vblank, | 97 | .disable_vblank = sun4i_drv_disable_vblank, |
| 126 | }; | 98 | }; |
| 127 | 99 | ||
| 100 | static void sun4i_remove_framebuffers(void) | ||
| 101 | { | ||
| 102 | struct apertures_struct *ap; | ||
| 103 | |||
| 104 | ap = alloc_apertures(1); | ||
| 105 | if (!ap) | ||
| 106 | return; | ||
| 107 | |||
| 108 | /* The framebuffer can be located anywhere in RAM */ | ||
| 109 | ap->ranges[0].base = 0; | ||
| 110 | ap->ranges[0].size = ~0; | ||
| 111 | |||
| 112 | remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false); | ||
| 113 | kfree(ap); | ||
| 114 | } | ||
| 115 | |||
| 128 | static int sun4i_drv_bind(struct device *dev) | 116 | static int sun4i_drv_bind(struct device *dev) |
| 129 | { | 117 | { |
| 130 | struct drm_device *drm; | 118 | struct drm_device *drm; |
| @@ -172,6 +160,9 @@ static int sun4i_drv_bind(struct device *dev) | |||
| 172 | } | 160 | } |
| 173 | drm->irq_enabled = true; | 161 | drm->irq_enabled = true; |
| 174 | 162 | ||
| 163 | /* Remove early framebuffers (ie. simplefb) */ | ||
| 164 | sun4i_remove_framebuffers(); | ||
| 165 | |||
| 175 | /* Create our framebuffer */ | 166 | /* Create our framebuffer */ |
| 176 | drv->fbdev = sun4i_framebuffer_init(drm); | 167 | drv->fbdev = sun4i_framebuffer_init(drm); |
| 177 | if (IS_ERR(drv->fbdev)) { | 168 | if (IS_ERR(drv->fbdev)) { |
| @@ -187,7 +178,7 @@ static int sun4i_drv_bind(struct device *dev) | |||
| 187 | if (ret) | 178 | if (ret) |
| 188 | goto free_drm; | 179 | goto free_drm; |
| 189 | 180 | ||
| 190 | ret = sun4i_drv_connector_plug_all(drm); | 181 | ret = drm_connector_register_all(drm); |
| 191 | if (ret) | 182 | if (ret) |
| 192 | goto unregister_drm; | 183 | goto unregister_drm; |
| 193 | 184 | ||
| @@ -204,6 +195,7 @@ static void sun4i_drv_unbind(struct device *dev) | |||
| 204 | { | 195 | { |
| 205 | struct drm_device *drm = dev_get_drvdata(dev); | 196 | struct drm_device *drm = dev_get_drvdata(dev); |
| 206 | 197 | ||
| 198 | drm_connector_unregister_all(drm); | ||
| 207 | drm_dev_unregister(drm); | 199 | drm_dev_unregister(drm); |
| 208 | drm_kms_helper_poll_fini(drm); | 200 | drm_kms_helper_poll_fini(drm); |
| 209 | sun4i_framebuffer_free(drm); | 201 | sun4i_framebuffer_free(drm); |
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index ab6494818050..aaffe9e64ffb 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c | |||
| @@ -54,8 +54,13 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector) | |||
| 54 | static int sun4i_rgb_mode_valid(struct drm_connector *connector, | 54 | static int sun4i_rgb_mode_valid(struct drm_connector *connector, |
| 55 | struct drm_display_mode *mode) | 55 | struct drm_display_mode *mode) |
| 56 | { | 56 | { |
| 57 | struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); | ||
| 58 | struct sun4i_drv *drv = rgb->drv; | ||
| 59 | struct sun4i_tcon *tcon = drv->tcon; | ||
| 57 | u32 hsync = mode->hsync_end - mode->hsync_start; | 60 | u32 hsync = mode->hsync_end - mode->hsync_start; |
| 58 | u32 vsync = mode->vsync_end - mode->vsync_start; | 61 | u32 vsync = mode->vsync_end - mode->vsync_start; |
| 62 | unsigned long rate = mode->clock * 1000; | ||
| 63 | long rounded_rate; | ||
| 59 | 64 | ||
| 60 | DRM_DEBUG_DRIVER("Validating modes...\n"); | 65 | DRM_DEBUG_DRIVER("Validating modes...\n"); |
| 61 | 66 | ||
| @@ -87,6 +92,15 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector, | |||
| 87 | 92 | ||
| 88 | DRM_DEBUG_DRIVER("Vertical parameters OK\n"); | 93 | DRM_DEBUG_DRIVER("Vertical parameters OK\n"); |
| 89 | 94 | ||
| 95 | rounded_rate = clk_round_rate(tcon->dclk, rate); | ||
| 96 | if (rounded_rate < rate) | ||
| 97 | return MODE_CLOCK_LOW; | ||
| 98 | |||
| 99 | if (rounded_rate > rate) | ||
| 100 | return MODE_CLOCK_HIGH; | ||
| 101 | |||
| 102 | DRM_DEBUG_DRIVER("Clock rate OK\n"); | ||
| 103 | |||
| 90 | return MODE_OK; | 104 | return MODE_OK; |
| 91 | } | 105 | } |
| 92 | 106 | ||
| @@ -203,7 +217,7 @@ int sun4i_rgb_init(struct drm_device *drm) | |||
| 203 | int ret; | 217 | int ret; |
| 204 | 218 | ||
| 205 | /* If we don't have a panel, there's no point in going on */ | 219 | /* If we don't have a panel, there's no point in going on */ |
| 206 | if (!tcon->panel) | 220 | if (IS_ERR(tcon->panel)) |
| 207 | return -ENODEV; | 221 | return -ENODEV; |
| 208 | 222 | ||
| 209 | rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL); | 223 | rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 9f19b0e08560..652385f09735 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c | |||
| @@ -425,11 +425,11 @@ static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node) | |||
| 425 | 425 | ||
| 426 | remote = of_graph_get_remote_port_parent(end_node); | 426 | remote = of_graph_get_remote_port_parent(end_node); |
| 427 | if (!remote) { | 427 | if (!remote) { |
| 428 | DRM_DEBUG_DRIVER("Enable to parse remote node\n"); | 428 | DRM_DEBUG_DRIVER("Unable to parse remote node\n"); |
| 429 | return ERR_PTR(-EINVAL); | 429 | return ERR_PTR(-EINVAL); |
| 430 | } | 430 | } |
| 431 | 431 | ||
| 432 | return of_drm_find_panel(remote); | 432 | return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER); |
| 433 | } | 433 | } |
| 434 | 434 | ||
| 435 | static int sun4i_tcon_bind(struct device *dev, struct device *master, | 435 | static int sun4i_tcon_bind(struct device *dev, struct device *master, |
| @@ -490,7 +490,11 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master, | |||
| 490 | return 0; | 490 | return 0; |
| 491 | } | 491 | } |
| 492 | 492 | ||
| 493 | return sun4i_rgb_init(drm); | 493 | ret = sun4i_rgb_init(drm); |
| 494 | if (ret < 0) | ||
| 495 | goto err_free_clocks; | ||
| 496 | |||
| 497 | return 0; | ||
| 494 | 498 | ||
| 495 | err_free_clocks: | 499 | err_free_clocks: |
| 496 | sun4i_tcon_free_clocks(tcon); | 500 | sun4i_tcon_free_clocks(tcon); |
| @@ -522,12 +526,13 @@ static int sun4i_tcon_probe(struct platform_device *pdev) | |||
| 522 | * Defer the probe. | 526 | * Defer the probe. |
| 523 | */ | 527 | */ |
| 524 | panel = sun4i_tcon_find_panel(node); | 528 | panel = sun4i_tcon_find_panel(node); |
| 525 | if (IS_ERR(panel)) { | 529 | |
| 526 | /* | 530 | /* |
| 527 | * If we don't have a panel endpoint, just go on | 531 | * If we don't have a panel endpoint, just go on |
| 528 | */ | 532 | */ |
| 529 | if (PTR_ERR(panel) != -ENODEV) | 533 | if (PTR_ERR(panel) == -EPROBE_DEFER) { |
| 530 | return -EPROBE_DEFER; | 534 | DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n"); |
| 535 | return -EPROBE_DEFER; | ||
| 531 | } | 536 | } |
| 532 | 537 | ||
| 533 | return component_add(&pdev->dev, &sun4i_tcon_ops); | 538 | return component_add(&pdev->dev, &sun4i_tcon_ops); |
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 904d0754ad78..0f18b76c7906 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c | |||
| @@ -456,14 +456,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 456 | 456 | ||
| 457 | WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); | 457 | WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); |
| 458 | 458 | ||
| 459 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), | ||
| 460 | vc4_state->mm.start); | ||
| 461 | |||
| 462 | if (debug_dump_regs) { | ||
| 463 | DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); | ||
| 464 | vc4_hvs_dump_state(dev); | ||
| 465 | } | ||
| 466 | |||
| 467 | if (crtc->state->event) { | 459 | if (crtc->state->event) { |
| 468 | unsigned long flags; | 460 | unsigned long flags; |
| 469 | 461 | ||
| @@ -473,8 +465,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 473 | 465 | ||
| 474 | spin_lock_irqsave(&dev->event_lock, flags); | 466 | spin_lock_irqsave(&dev->event_lock, flags); |
| 475 | vc4_crtc->event = crtc->state->event; | 467 | vc4_crtc->event = crtc->state->event; |
| 476 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 477 | crtc->state->event = NULL; | 468 | crtc->state->event = NULL; |
| 469 | |||
| 470 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), | ||
| 471 | vc4_state->mm.start); | ||
| 472 | |||
| 473 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 474 | } else { | ||
| 475 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), | ||
| 476 | vc4_state->mm.start); | ||
| 477 | } | ||
| 478 | |||
| 479 | if (debug_dump_regs) { | ||
| 480 | DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); | ||
| 481 | vc4_hvs_dump_state(dev); | ||
| 478 | } | 482 | } |
| 479 | } | 483 | } |
| 480 | 484 | ||
| @@ -500,12 +504,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) | |||
| 500 | { | 504 | { |
| 501 | struct drm_crtc *crtc = &vc4_crtc->base; | 505 | struct drm_crtc *crtc = &vc4_crtc->base; |
| 502 | struct drm_device *dev = crtc->dev; | 506 | struct drm_device *dev = crtc->dev; |
| 507 | struct vc4_dev *vc4 = to_vc4_dev(dev); | ||
| 508 | struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); | ||
| 509 | u32 chan = vc4_crtc->channel; | ||
| 503 | unsigned long flags; | 510 | unsigned long flags; |
| 504 | 511 | ||
| 505 | spin_lock_irqsave(&dev->event_lock, flags); | 512 | spin_lock_irqsave(&dev->event_lock, flags); |
| 506 | if (vc4_crtc->event) { | 513 | if (vc4_crtc->event && |
| 514 | (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) { | ||
| 507 | drm_crtc_send_vblank_event(crtc, vc4_crtc->event); | 515 | drm_crtc_send_vblank_event(crtc, vc4_crtc->event); |
| 508 | vc4_crtc->event = NULL; | 516 | vc4_crtc->event = NULL; |
| 517 | drm_crtc_vblank_put(crtc); | ||
| 509 | } | 518 | } |
| 510 | spin_unlock_irqrestore(&dev->event_lock, flags); | 519 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 511 | } | 520 | } |
| @@ -556,6 +565,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) | |||
| 556 | spin_unlock_irqrestore(&dev->event_lock, flags); | 565 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 557 | } | 566 | } |
| 558 | 567 | ||
| 568 | drm_crtc_vblank_put(crtc); | ||
| 559 | drm_framebuffer_unreference(flip_state->fb); | 569 | drm_framebuffer_unreference(flip_state->fb); |
| 560 | kfree(flip_state); | 570 | kfree(flip_state); |
| 561 | 571 | ||
| @@ -598,6 +608,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, | |||
| 598 | return ret; | 608 | return ret; |
| 599 | } | 609 | } |
| 600 | 610 | ||
| 611 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); | ||
| 612 | |||
| 601 | /* Immediately update the plane's legacy fb pointer, so that later | 613 | /* Immediately update the plane's legacy fb pointer, so that later |
| 602 | * modeset prep sees the state that will be present when the semaphore | 614 | * modeset prep sees the state that will be present when the semaphore |
| 603 | * is released. | 615 | * is released. |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 3446ece21b4a..250ed7e3754c 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c | |||
| @@ -66,12 +66,12 @@ static const struct file_operations vc4_drm_fops = { | |||
| 66 | }; | 66 | }; |
| 67 | 67 | ||
| 68 | static const struct drm_ioctl_desc vc4_drm_ioctls[] = { | 68 | static const struct drm_ioctl_desc vc4_drm_ioctls[] = { |
| 69 | DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0), | 69 | DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW), |
| 70 | DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0), | 70 | DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW), |
| 71 | DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0), | 71 | DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW), |
| 72 | DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), | 72 | DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW), |
| 73 | DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), | 73 | DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW), |
| 74 | DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0), | 74 | DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW), |
| 75 | DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, | 75 | DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, |
| 76 | DRM_ROOT_ONLY), | 76 | DRM_ROOT_ONLY), |
| 77 | }; | 77 | }; |
| @@ -91,7 +91,7 @@ static struct drm_driver vc4_drm_driver = { | |||
| 91 | 91 | ||
| 92 | .enable_vblank = vc4_enable_vblank, | 92 | .enable_vblank = vc4_enable_vblank, |
| 93 | .disable_vblank = vc4_disable_vblank, | 93 | .disable_vblank = vc4_disable_vblank, |
| 94 | .get_vblank_counter = drm_vblank_count, | 94 | .get_vblank_counter = drm_vblank_no_hw_counter, |
| 95 | 95 | ||
| 96 | #if defined(CONFIG_DEBUG_FS) | 96 | #if defined(CONFIG_DEBUG_FS) |
| 97 | .debugfs_init = vc4_debugfs_init, | 97 | .debugfs_init = vc4_debugfs_init, |
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index cb37751bc99f..861a623bc185 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c | |||
| @@ -117,10 +117,18 @@ static int vc4_atomic_commit(struct drm_device *dev, | |||
| 117 | return -ENOMEM; | 117 | return -ENOMEM; |
| 118 | 118 | ||
| 119 | /* Make sure that any outstanding modesets have finished. */ | 119 | /* Make sure that any outstanding modesets have finished. */ |
| 120 | ret = down_interruptible(&vc4->async_modeset); | 120 | if (nonblock) { |
| 121 | if (ret) { | 121 | ret = down_trylock(&vc4->async_modeset); |
| 122 | kfree(c); | 122 | if (ret) { |
| 123 | return ret; | 123 | kfree(c); |
| 124 | return -EBUSY; | ||
| 125 | } | ||
| 126 | } else { | ||
| 127 | ret = down_interruptible(&vc4->async_modeset); | ||
| 128 | if (ret) { | ||
| 129 | kfree(c); | ||
| 130 | return ret; | ||
| 131 | } | ||
| 124 | } | 132 | } |
| 125 | 133 | ||
| 126 | ret = drm_atomic_helper_prepare_planes(dev, state); | 134 | ret = drm_atomic_helper_prepare_planes(dev, state); |
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 6163b95c5411..f99eece4cc97 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h | |||
| @@ -341,6 +341,10 @@ | |||
| 341 | #define SCALER_DISPLACT0 0x00000030 | 341 | #define SCALER_DISPLACT0 0x00000030 |
| 342 | #define SCALER_DISPLACT1 0x00000034 | 342 | #define SCALER_DISPLACT1 0x00000034 |
| 343 | #define SCALER_DISPLACT2 0x00000038 | 343 | #define SCALER_DISPLACT2 0x00000038 |
| 344 | #define SCALER_DISPLACTX(x) (SCALER_DISPLACT0 + \ | ||
| 345 | (x) * (SCALER_DISPLACT1 - \ | ||
| 346 | SCALER_DISPLACT0)) | ||
| 347 | |||
| 344 | #define SCALER_DISPCTRL0 0x00000040 | 348 | #define SCALER_DISPCTRL0 0x00000040 |
| 345 | # define SCALER_DISPCTRLX_ENABLE BIT(31) | 349 | # define SCALER_DISPCTRLX_ENABLE BIT(31) |
| 346 | # define SCALER_DISPCTRLX_RESET BIT(30) | 350 | # define SCALER_DISPCTRLX_RESET BIT(30) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 6de283c8fa3e..f0374f9b56ca 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
| 30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
| 31 | #include <linux/frame.h> | ||
| 31 | #include <asm/hypervisor.h> | 32 | #include <asm/hypervisor.h> |
| 32 | #include "drmP.h" | 33 | #include "drmP.h" |
| 33 | #include "vmwgfx_msg.h" | 34 | #include "vmwgfx_msg.h" |
| @@ -194,7 +195,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) | |||
| 194 | 195 | ||
| 195 | return -EINVAL; | 196 | return -EINVAL; |
| 196 | } | 197 | } |
| 197 | 198 | STACK_FRAME_NON_STANDARD(vmw_send_msg); | |
| 198 | 199 | ||
| 199 | 200 | ||
| 200 | /** | 201 | /** |
| @@ -304,6 +305,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, | |||
| 304 | 305 | ||
| 305 | return 0; | 306 | return 0; |
| 306 | } | 307 | } |
| 308 | STACK_FRAME_NON_STANDARD(vmw_recv_msg); | ||
| 307 | 309 | ||
| 308 | 310 | ||
| 309 | /** | 311 | /** |
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index aad8c162a825..0cd4f7216239 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c | |||
| @@ -261,7 +261,7 @@ static void elo_remove(struct hid_device *hdev) | |||
| 261 | struct elo_priv *priv = hid_get_drvdata(hdev); | 261 | struct elo_priv *priv = hid_get_drvdata(hdev); |
| 262 | 262 | ||
| 263 | hid_hw_stop(hdev); | 263 | hid_hw_stop(hdev); |
| 264 | flush_workqueue(wq); | 264 | cancel_delayed_work_sync(&priv->work); |
| 265 | kfree(priv); | 265 | kfree(priv); |
| 266 | } | 266 | } |
| 267 | 267 | ||
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index c741f5e50a66..95b7d61d9910 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c | |||
| @@ -1401,6 +1401,11 @@ static const struct hid_device_id mt_devices[] = { | |||
| 1401 | MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK, | 1401 | MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK, |
| 1402 | USB_DEVICE_ID_NOVATEK_PCT) }, | 1402 | USB_DEVICE_ID_NOVATEK_PCT) }, |
| 1403 | 1403 | ||
| 1404 | /* Ntrig Panel */ | ||
| 1405 | { .driver_data = MT_CLS_NSMU, | ||
| 1406 | HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, | ||
| 1407 | USB_VENDOR_ID_NTRIG, 0x1b05) }, | ||
| 1408 | |||
| 1404 | /* PixArt optical touch screen */ | 1409 | /* PixArt optical touch screen */ |
| 1405 | { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER, | 1410 | { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER, |
| 1406 | MT_USB_DEVICE(USB_VENDOR_ID_PIXART, | 1411 | MT_USB_DEVICE(USB_VENDOR_ID_PIXART, |
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 2f1ddca6f2e0..700145b15088 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c | |||
| @@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, | |||
| 516 | goto inval; | 516 | goto inval; |
| 517 | } else if (uref->usage_index >= field->report_count) | 517 | } else if (uref->usage_index >= field->report_count) |
| 518 | goto inval; | 518 | goto inval; |
| 519 | |||
| 520 | else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && | ||
| 521 | (uref_multi->num_values > HID_MAX_MULTI_USAGES || | ||
| 522 | uref->usage_index + uref_multi->num_values > field->report_count)) | ||
| 523 | goto inval; | ||
| 524 | } | 519 | } |
| 525 | 520 | ||
| 521 | if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && | ||
| 522 | (uref_multi->num_values > HID_MAX_MULTI_USAGES || | ||
| 523 | uref->usage_index + uref_multi->num_values > field->report_count)) | ||
| 524 | goto inval; | ||
| 525 | |||
| 526 | switch (cmd) { | 526 | switch (cmd) { |
| 527 | case HIDIOCGUSAGE: | 527 | case HIDIOCGUSAGE: |
| 528 | uref->value = field->value[uref->usage_index]; | 528 | uref->value = field->value[uref->usage_index]; |
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index c43318d3416e..2ac87d553e22 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/uaccess.h> | 35 | #include <linux/uaccess.h> |
| 36 | #include <linux/io.h> | 36 | #include <linux/io.h> |
| 37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
| 38 | #include <linux/ctype.h> | ||
| 38 | 39 | ||
| 39 | #include <linux/i8k.h> | 40 | #include <linux/i8k.h> |
| 40 | 41 | ||
| @@ -66,11 +67,13 @@ | |||
| 66 | 67 | ||
| 67 | static DEFINE_MUTEX(i8k_mutex); | 68 | static DEFINE_MUTEX(i8k_mutex); |
| 68 | static char bios_version[4]; | 69 | static char bios_version[4]; |
| 70 | static char bios_machineid[16]; | ||
| 69 | static struct device *i8k_hwmon_dev; | 71 | static struct device *i8k_hwmon_dev; |
| 70 | static u32 i8k_hwmon_flags; | 72 | static u32 i8k_hwmon_flags; |
| 71 | static uint i8k_fan_mult = I8K_FAN_MULT; | 73 | static uint i8k_fan_mult = I8K_FAN_MULT; |
| 72 | static uint i8k_pwm_mult; | 74 | static uint i8k_pwm_mult; |
| 73 | static uint i8k_fan_max = I8K_FAN_HIGH; | 75 | static uint i8k_fan_max = I8K_FAN_HIGH; |
| 76 | static bool disallow_fan_type_call; | ||
| 74 | 77 | ||
| 75 | #define I8K_HWMON_HAVE_TEMP1 (1 << 0) | 78 | #define I8K_HWMON_HAVE_TEMP1 (1 << 0) |
| 76 | #define I8K_HWMON_HAVE_TEMP2 (1 << 1) | 79 | #define I8K_HWMON_HAVE_TEMP2 (1 << 1) |
| @@ -94,13 +97,13 @@ module_param(ignore_dmi, bool, 0); | |||
| 94 | MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match"); | 97 | MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match"); |
| 95 | 98 | ||
| 96 | #if IS_ENABLED(CONFIG_I8K) | 99 | #if IS_ENABLED(CONFIG_I8K) |
| 97 | static bool restricted; | 100 | static bool restricted = true; |
| 98 | module_param(restricted, bool, 0); | 101 | module_param(restricted, bool, 0); |
| 99 | MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set"); | 102 | MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)"); |
| 100 | 103 | ||
| 101 | static bool power_status; | 104 | static bool power_status; |
| 102 | module_param(power_status, bool, 0600); | 105 | module_param(power_status, bool, 0600); |
| 103 | MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k"); | 106 | MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)"); |
| 104 | #endif | 107 | #endif |
| 105 | 108 | ||
| 106 | static uint fan_mult; | 109 | static uint fan_mult; |
| @@ -235,14 +238,28 @@ static int i8k_get_fan_speed(int fan) | |||
| 235 | /* | 238 | /* |
| 236 | * Read the fan type. | 239 | * Read the fan type. |
| 237 | */ | 240 | */ |
| 238 | static int i8k_get_fan_type(int fan) | 241 | static int _i8k_get_fan_type(int fan) |
| 239 | { | 242 | { |
| 240 | struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, }; | 243 | struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, }; |
| 241 | 244 | ||
| 245 | if (disallow_fan_type_call) | ||
| 246 | return -EINVAL; | ||
| 247 | |||
| 242 | regs.ebx = fan & 0xff; | 248 | regs.ebx = fan & 0xff; |
| 243 | return i8k_smm(®s) ? : regs.eax & 0xff; | 249 | return i8k_smm(®s) ? : regs.eax & 0xff; |
| 244 | } | 250 | } |
| 245 | 251 | ||
| 252 | static int i8k_get_fan_type(int fan) | ||
| 253 | { | ||
| 254 | /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */ | ||
| 255 | static int types[2] = { INT_MIN, INT_MIN }; | ||
| 256 | |||
| 257 | if (types[fan] == INT_MIN) | ||
| 258 | types[fan] = _i8k_get_fan_type(fan); | ||
| 259 | |||
| 260 | return types[fan]; | ||
| 261 | } | ||
| 262 | |||
| 246 | /* | 263 | /* |
| 247 | * Read the fan nominal rpm for specific fan speed. | 264 | * Read the fan nominal rpm for specific fan speed. |
| 248 | */ | 265 | */ |
| @@ -387,14 +404,20 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg) | |||
| 387 | 404 | ||
| 388 | switch (cmd) { | 405 | switch (cmd) { |
| 389 | case I8K_BIOS_VERSION: | 406 | case I8K_BIOS_VERSION: |
| 407 | if (!isdigit(bios_version[0]) || !isdigit(bios_version[1]) || | ||
| 408 | !isdigit(bios_version[2])) | ||
| 409 | return -EINVAL; | ||
| 410 | |||
| 390 | val = (bios_version[0] << 16) | | 411 | val = (bios_version[0] << 16) | |
| 391 | (bios_version[1] << 8) | bios_version[2]; | 412 | (bios_version[1] << 8) | bios_version[2]; |
| 392 | break; | 413 | break; |
| 393 | 414 | ||
| 394 | case I8K_MACHINE_ID: | 415 | case I8K_MACHINE_ID: |
| 395 | memset(buff, 0, 16); | 416 | if (restricted && !capable(CAP_SYS_ADMIN)) |
| 396 | strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), | 417 | return -EPERM; |
| 397 | sizeof(buff)); | 418 | |
| 419 | memset(buff, 0, sizeof(buff)); | ||
| 420 | strlcpy(buff, bios_machineid, sizeof(buff)); | ||
| 398 | break; | 421 | break; |
| 399 | 422 | ||
| 400 | case I8K_FN_STATUS: | 423 | case I8K_FN_STATUS: |
| @@ -511,7 +534,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset) | |||
| 511 | seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n", | 534 | seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n", |
| 512 | I8K_PROC_FMT, | 535 | I8K_PROC_FMT, |
| 513 | bios_version, | 536 | bios_version, |
| 514 | i8k_get_dmi_data(DMI_PRODUCT_SERIAL), | 537 | (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid, |
| 515 | cpu_temp, | 538 | cpu_temp, |
| 516 | left_fan, right_fan, left_speed, right_speed, | 539 | left_fan, right_fan, left_speed, right_speed, |
| 517 | ac_power, fn_key); | 540 | ac_power, fn_key); |
| @@ -718,6 +741,9 @@ static struct attribute *i8k_attrs[] = { | |||
| 718 | static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, | 741 | static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, |
| 719 | int index) | 742 | int index) |
| 720 | { | 743 | { |
| 744 | if (disallow_fan_type_call && | ||
| 745 | (index == 9 || index == 12)) | ||
| 746 | return 0; | ||
| 721 | if (index >= 0 && index <= 1 && | 747 | if (index >= 0 && index <= 1 && |
| 722 | !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1)) | 748 | !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1)) |
| 723 | return 0; | 749 | return 0; |
| @@ -767,13 +793,17 @@ static int __init i8k_init_hwmon(void) | |||
| 767 | if (err >= 0) | 793 | if (err >= 0) |
| 768 | i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4; | 794 | i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4; |
| 769 | 795 | ||
| 770 | /* First fan attributes, if fan type is OK */ | 796 | /* First fan attributes, if fan status or type is OK */ |
| 771 | err = i8k_get_fan_type(0); | 797 | err = i8k_get_fan_status(0); |
| 798 | if (err < 0) | ||
| 799 | err = i8k_get_fan_type(0); | ||
| 772 | if (err >= 0) | 800 | if (err >= 0) |
| 773 | i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1; | 801 | i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1; |
| 774 | 802 | ||
| 775 | /* Second fan attributes, if fan type is OK */ | 803 | /* Second fan attributes, if fan status or type is OK */ |
| 776 | err = i8k_get_fan_type(1); | 804 | err = i8k_get_fan_status(1); |
| 805 | if (err < 0) | ||
| 806 | err = i8k_get_fan_type(1); | ||
| 777 | if (err >= 0) | 807 | if (err >= 0) |
| 778 | i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2; | 808 | i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2; |
| 779 | 809 | ||
| @@ -929,12 +959,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = { | |||
| 929 | 959 | ||
| 930 | MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); | 960 | MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); |
| 931 | 961 | ||
| 932 | static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { | 962 | /* |
| 963 | * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed | ||
| 964 | * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist | ||
| 965 | * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call. | ||
| 966 | * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121 | ||
| 967 | */ | ||
| 968 | static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = { | ||
| 933 | { | 969 | { |
| 934 | /* | ||
| 935 | * CPU fan speed going up and down on Dell Studio XPS 8000 | ||
| 936 | * for unknown reasons. | ||
| 937 | */ | ||
| 938 | .ident = "Dell Studio XPS 8000", | 970 | .ident = "Dell Studio XPS 8000", |
| 939 | .matches = { | 971 | .matches = { |
| 940 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | 972 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
| @@ -942,16 +974,19 @@ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { | |||
| 942 | }, | 974 | }, |
| 943 | }, | 975 | }, |
| 944 | { | 976 | { |
| 945 | /* | ||
| 946 | * CPU fan speed going up and down on Dell Studio XPS 8100 | ||
| 947 | * for unknown reasons. | ||
| 948 | */ | ||
| 949 | .ident = "Dell Studio XPS 8100", | 977 | .ident = "Dell Studio XPS 8100", |
| 950 | .matches = { | 978 | .matches = { |
| 951 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | 979 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
| 952 | DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"), | 980 | DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"), |
| 953 | }, | 981 | }, |
| 954 | }, | 982 | }, |
| 983 | { | ||
| 984 | .ident = "Dell Inspiron 580", | ||
| 985 | .matches = { | ||
| 986 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
| 987 | DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "), | ||
| 988 | }, | ||
| 989 | }, | ||
| 955 | { } | 990 | { } |
| 956 | }; | 991 | }; |
| 957 | 992 | ||
| @@ -966,8 +1001,7 @@ static int __init i8k_probe(void) | |||
| 966 | /* | 1001 | /* |
| 967 | * Get DMI information | 1002 | * Get DMI information |
| 968 | */ | 1003 | */ |
| 969 | if (!dmi_check_system(i8k_dmi_table) || | 1004 | if (!dmi_check_system(i8k_dmi_table)) { |
| 970 | dmi_check_system(i8k_blacklist_dmi_table)) { | ||
| 971 | if (!ignore_dmi && !force) | 1005 | if (!ignore_dmi && !force) |
| 972 | return -ENODEV; | 1006 | return -ENODEV; |
| 973 | 1007 | ||
| @@ -978,8 +1012,13 @@ static int __init i8k_probe(void) | |||
| 978 | i8k_get_dmi_data(DMI_BIOS_VERSION)); | 1012 | i8k_get_dmi_data(DMI_BIOS_VERSION)); |
| 979 | } | 1013 | } |
| 980 | 1014 | ||
| 1015 | if (dmi_check_system(i8k_blacklist_fan_type_dmi_table)) | ||
| 1016 | disallow_fan_type_call = true; | ||
| 1017 | |||
| 981 | strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), | 1018 | strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), |
| 982 | sizeof(bios_version)); | 1019 | sizeof(bios_version)); |
| 1020 | strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), | ||
| 1021 | sizeof(bios_machineid)); | ||
| 983 | 1022 | ||
| 984 | /* | 1023 | /* |
| 985 | * Get SMM Dell signature | 1024 | * Get SMM Dell signature |
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index eb97a9241d17..15aa49d082c4 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c | |||
| @@ -172,9 +172,9 @@ static void do_read_registers_on_cu(void *_data) | |||
| 172 | */ | 172 | */ |
| 173 | static int read_registers(struct fam15h_power_data *data) | 173 | static int read_registers(struct fam15h_power_data *data) |
| 174 | { | 174 | { |
| 175 | int this_cpu, ret, cpu; | ||
| 176 | int core, this_core; | 175 | int core, this_core; |
| 177 | cpumask_var_t mask; | 176 | cpumask_var_t mask; |
| 177 | int ret, cpu; | ||
| 178 | 178 | ||
| 179 | ret = zalloc_cpumask_var(&mask, GFP_KERNEL); | 179 | ret = zalloc_cpumask_var(&mask, GFP_KERNEL); |
| 180 | if (!ret) | 180 | if (!ret) |
| @@ -183,7 +183,6 @@ static int read_registers(struct fam15h_power_data *data) | |||
| 183 | memset(data->cu_on, 0, sizeof(int) * MAX_CUS); | 183 | memset(data->cu_on, 0, sizeof(int) * MAX_CUS); |
| 184 | 184 | ||
| 185 | get_online_cpus(); | 185 | get_online_cpus(); |
| 186 | this_cpu = smp_processor_id(); | ||
| 187 | 186 | ||
| 188 | /* | 187 | /* |
| 189 | * Choose the first online core of each compute unit, and then | 188 | * Choose the first online core of each compute unit, and then |
| @@ -205,12 +204,9 @@ static int read_registers(struct fam15h_power_data *data) | |||
| 205 | cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask); | 204 | cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask); |
| 206 | } | 205 | } |
| 207 | 206 | ||
| 208 | if (cpumask_test_cpu(this_cpu, mask)) | 207 | on_each_cpu_mask(mask, do_read_registers_on_cu, data, true); |
| 209 | do_read_registers_on_cu(data); | ||
| 210 | 208 | ||
| 211 | smp_call_function_many(mask, do_read_registers_on_cu, data, true); | ||
| 212 | put_online_cpus(); | 209 | put_online_cpus(); |
| 213 | |||
| 214 | free_cpumask_var(mask); | 210 | free_cpumask_var(mask); |
| 215 | 211 | ||
| 216 | return 0; | 212 | return 0; |
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index c9ff08dbe10c..e30a5939dc0d 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c | |||
| @@ -375,7 +375,7 @@ struct lm90_data { | |||
| 375 | int kind; | 375 | int kind; |
| 376 | u32 flags; | 376 | u32 flags; |
| 377 | 377 | ||
| 378 | int update_interval; /* in milliseconds */ | 378 | unsigned int update_interval; /* in milliseconds */ |
| 379 | 379 | ||
| 380 | u8 config_orig; /* Original configuration register value */ | 380 | u8 config_orig; /* Original configuration register value */ |
| 381 | u8 convrate_orig; /* Original conversion rate register value */ | 381 | u8 convrate_orig; /* Original conversion rate register value */ |
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 847d1b5f2c13..688be9e060fc 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c | |||
| @@ -300,13 +300,10 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) | |||
| 300 | if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { | 300 | if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { |
| 301 | /* | 301 | /* |
| 302 | * The trace run will continue with the same allocated trace | 302 | * The trace run will continue with the same allocated trace |
| 303 | * buffer. As such zero-out the buffer so that we don't end | 303 | * buffer. The trace buffer is cleared in tmc_etr_enable_hw(), |
| 304 | * up with stale data. | 304 | * so we don't have to explicitly clear it. Also, since the |
| 305 | * | 305 | * tracer is still enabled drvdata::buf can't be NULL. |
| 306 | * Since the tracer is still enabled drvdata::buf | ||
| 307 | * can't be NULL. | ||
| 308 | */ | 306 | */ |
| 309 | memset(drvdata->buf, 0, drvdata->size); | ||
| 310 | tmc_etr_enable_hw(drvdata); | 307 | tmc_etr_enable_hw(drvdata); |
| 311 | } else { | 308 | } else { |
| 312 | /* | 309 | /* |
| @@ -315,7 +312,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) | |||
| 315 | */ | 312 | */ |
| 316 | vaddr = drvdata->vaddr; | 313 | vaddr = drvdata->vaddr; |
| 317 | paddr = drvdata->paddr; | 314 | paddr = drvdata->paddr; |
| 318 | drvdata->buf = NULL; | 315 | drvdata->buf = drvdata->vaddr = NULL; |
| 319 | } | 316 | } |
| 320 | 317 | ||
| 321 | drvdata->reading = false; | 318 | drvdata->reading = false; |
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 5443d03a1eec..d08d1ab9bba5 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c | |||
| @@ -385,7 +385,6 @@ static int _coresight_build_path(struct coresight_device *csdev, | |||
| 385 | int i; | 385 | int i; |
| 386 | bool found = false; | 386 | bool found = false; |
| 387 | struct coresight_node *node; | 387 | struct coresight_node *node; |
| 388 | struct coresight_connection *conn; | ||
| 389 | 388 | ||
| 390 | /* An activated sink has been found. Enqueue the element */ | 389 | /* An activated sink has been found. Enqueue the element */ |
| 391 | if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || | 390 | if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || |
| @@ -394,8 +393,9 @@ static int _coresight_build_path(struct coresight_device *csdev, | |||
| 394 | 393 | ||
| 395 | /* Not a sink - recursively explore each port found on this element */ | 394 | /* Not a sink - recursively explore each port found on this element */ |
| 396 | for (i = 0; i < csdev->nr_outport; i++) { | 395 | for (i = 0; i < csdev->nr_outport; i++) { |
| 397 | conn = &csdev->conns[i]; | 396 | struct coresight_device *child_dev = csdev->conns[i].child_dev; |
| 398 | if (_coresight_build_path(conn->child_dev, path) == 0) { | 397 | |
| 398 | if (child_dev && _coresight_build_path(child_dev, path) == 0) { | ||
| 399 | found = true; | 399 | found = true; |
| 400 | break; | 400 | break; |
| 401 | } | 401 | } |
| @@ -425,6 +425,7 @@ out: | |||
| 425 | struct list_head *coresight_build_path(struct coresight_device *csdev) | 425 | struct list_head *coresight_build_path(struct coresight_device *csdev) |
| 426 | { | 426 | { |
| 427 | struct list_head *path; | 427 | struct list_head *path; |
| 428 | int rc; | ||
| 428 | 429 | ||
| 429 | path = kzalloc(sizeof(struct list_head), GFP_KERNEL); | 430 | path = kzalloc(sizeof(struct list_head), GFP_KERNEL); |
| 430 | if (!path) | 431 | if (!path) |
| @@ -432,9 +433,10 @@ struct list_head *coresight_build_path(struct coresight_device *csdev) | |||
| 432 | 433 | ||
| 433 | INIT_LIST_HEAD(path); | 434 | INIT_LIST_HEAD(path); |
| 434 | 435 | ||
| 435 | if (_coresight_build_path(csdev, path)) { | 436 | rc = _coresight_build_path(csdev, path); |
| 437 | if (rc) { | ||
| 436 | kfree(path); | 438 | kfree(path); |
| 437 | path = NULL; | 439 | return ERR_PTR(rc); |
| 438 | } | 440 | } |
| 439 | 441 | ||
| 440 | return path; | 442 | return path; |
| @@ -507,8 +509,9 @@ int coresight_enable(struct coresight_device *csdev) | |||
| 507 | goto out; | 509 | goto out; |
| 508 | 510 | ||
| 509 | path = coresight_build_path(csdev); | 511 | path = coresight_build_path(csdev); |
| 510 | if (!path) { | 512 | if (IS_ERR(path)) { |
| 511 | pr_err("building path(s) failed\n"); | 513 | pr_err("building path(s) failed\n"); |
| 514 | ret = PTR_ERR(path); | ||
| 512 | goto out; | 515 | goto out; |
| 513 | } | 516 | } |
| 514 | 517 | ||
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 64b1208bca5e..4a60ad214747 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
| @@ -245,6 +245,13 @@ struct i801_priv { | |||
| 245 | struct platform_device *mux_pdev; | 245 | struct platform_device *mux_pdev; |
| 246 | #endif | 246 | #endif |
| 247 | struct platform_device *tco_pdev; | 247 | struct platform_device *tco_pdev; |
| 248 | |||
| 249 | /* | ||
| 250 | * If set to true the host controller registers are reserved for | ||
| 251 | * ACPI AML use. Protected by acpi_lock. | ||
| 252 | */ | ||
| 253 | bool acpi_reserved; | ||
| 254 | struct mutex acpi_lock; | ||
| 248 | }; | 255 | }; |
| 249 | 256 | ||
| 250 | #define FEATURE_SMBUS_PEC (1 << 0) | 257 | #define FEATURE_SMBUS_PEC (1 << 0) |
| @@ -718,6 +725,12 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, | |||
| 718 | int ret = 0, xact = 0; | 725 | int ret = 0, xact = 0; |
| 719 | struct i801_priv *priv = i2c_get_adapdata(adap); | 726 | struct i801_priv *priv = i2c_get_adapdata(adap); |
| 720 | 727 | ||
| 728 | mutex_lock(&priv->acpi_lock); | ||
| 729 | if (priv->acpi_reserved) { | ||
| 730 | mutex_unlock(&priv->acpi_lock); | ||
| 731 | return -EBUSY; | ||
| 732 | } | ||
| 733 | |||
| 721 | pm_runtime_get_sync(&priv->pci_dev->dev); | 734 | pm_runtime_get_sync(&priv->pci_dev->dev); |
| 722 | 735 | ||
| 723 | hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) | 736 | hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) |
| @@ -820,6 +833,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, | |||
| 820 | out: | 833 | out: |
| 821 | pm_runtime_mark_last_busy(&priv->pci_dev->dev); | 834 | pm_runtime_mark_last_busy(&priv->pci_dev->dev); |
| 822 | pm_runtime_put_autosuspend(&priv->pci_dev->dev); | 835 | pm_runtime_put_autosuspend(&priv->pci_dev->dev); |
| 836 | mutex_unlock(&priv->acpi_lock); | ||
| 823 | return ret; | 837 | return ret; |
| 824 | } | 838 | } |
| 825 | 839 | ||
| @@ -1257,6 +1271,83 @@ static void i801_add_tco(struct i801_priv *priv) | |||
| 1257 | priv->tco_pdev = pdev; | 1271 | priv->tco_pdev = pdev; |
| 1258 | } | 1272 | } |
| 1259 | 1273 | ||
| 1274 | #ifdef CONFIG_ACPI | ||
| 1275 | static acpi_status | ||
| 1276 | i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, | ||
| 1277 | u64 *value, void *handler_context, void *region_context) | ||
| 1278 | { | ||
| 1279 | struct i801_priv *priv = handler_context; | ||
| 1280 | struct pci_dev *pdev = priv->pci_dev; | ||
| 1281 | acpi_status status; | ||
| 1282 | |||
| 1283 | /* | ||
| 1284 | * Once BIOS AML code touches the OpRegion we warn and inhibit any | ||
| 1285 | * further access from the driver itself. This device is now owned | ||
| 1286 | * by the system firmware. | ||
| 1287 | */ | ||
| 1288 | mutex_lock(&priv->acpi_lock); | ||
| 1289 | |||
| 1290 | if (!priv->acpi_reserved) { | ||
| 1291 | priv->acpi_reserved = true; | ||
| 1292 | |||
| 1293 | dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); | ||
| 1294 | dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n"); | ||
| 1295 | |||
| 1296 | /* | ||
| 1297 | * BIOS is accessing the host controller so prevent it from | ||
| 1298 | * suspending automatically from now on. | ||
| 1299 | */ | ||
| 1300 | pm_runtime_get_sync(&pdev->dev); | ||
| 1301 | } | ||
| 1302 | |||
| 1303 | if ((function & ACPI_IO_MASK) == ACPI_READ) | ||
| 1304 | status = acpi_os_read_port(address, (u32 *)value, bits); | ||
| 1305 | else | ||
| 1306 | status = acpi_os_write_port(address, (u32)*value, bits); | ||
| 1307 | |||
| 1308 | mutex_unlock(&priv->acpi_lock); | ||
| 1309 | |||
| 1310 | return status; | ||
| 1311 | } | ||
| 1312 | |||
| 1313 | static int i801_acpi_probe(struct i801_priv *priv) | ||
| 1314 | { | ||
| 1315 | struct acpi_device *adev; | ||
| 1316 | acpi_status status; | ||
| 1317 | |||
| 1318 | adev = ACPI_COMPANION(&priv->pci_dev->dev); | ||
| 1319 | if (adev) { | ||
| 1320 | status = acpi_install_address_space_handler(adev->handle, | ||
| 1321 | ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler, | ||
| 1322 | NULL, priv); | ||
| 1323 | if (ACPI_SUCCESS(status)) | ||
| 1324 | return 0; | ||
| 1325 | } | ||
| 1326 | |||
| 1327 | return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]); | ||
| 1328 | } | ||
| 1329 | |||
| 1330 | static void i801_acpi_remove(struct i801_priv *priv) | ||
| 1331 | { | ||
| 1332 | struct acpi_device *adev; | ||
| 1333 | |||
| 1334 | adev = ACPI_COMPANION(&priv->pci_dev->dev); | ||
| 1335 | if (!adev) | ||
| 1336 | return; | ||
| 1337 | |||
| 1338 | acpi_remove_address_space_handler(adev->handle, | ||
| 1339 | ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler); | ||
| 1340 | |||
| 1341 | mutex_lock(&priv->acpi_lock); | ||
| 1342 | if (priv->acpi_reserved) | ||
| 1343 | pm_runtime_put(&priv->pci_dev->dev); | ||
| 1344 | mutex_unlock(&priv->acpi_lock); | ||
| 1345 | } | ||
| 1346 | #else | ||
| 1347 | static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; } | ||
| 1348 | static inline void i801_acpi_remove(struct i801_priv *priv) { } | ||
| 1349 | #endif | ||
| 1350 | |||
| 1260 | static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | 1351 | static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) |
| 1261 | { | 1352 | { |
| 1262 | unsigned char temp; | 1353 | unsigned char temp; |
| @@ -1274,6 +1365,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1274 | priv->adapter.dev.parent = &dev->dev; | 1365 | priv->adapter.dev.parent = &dev->dev; |
| 1275 | ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev)); | 1366 | ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev)); |
| 1276 | priv->adapter.retries = 3; | 1367 | priv->adapter.retries = 3; |
| 1368 | mutex_init(&priv->acpi_lock); | ||
| 1277 | 1369 | ||
| 1278 | priv->pci_dev = dev; | 1370 | priv->pci_dev = dev; |
| 1279 | switch (dev->device) { | 1371 | switch (dev->device) { |
| @@ -1336,10 +1428,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1336 | return -ENODEV; | 1428 | return -ENODEV; |
| 1337 | } | 1429 | } |
| 1338 | 1430 | ||
| 1339 | err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); | 1431 | if (i801_acpi_probe(priv)) |
| 1340 | if (err) { | ||
| 1341 | return -ENODEV; | 1432 | return -ENODEV; |
| 1342 | } | ||
| 1343 | 1433 | ||
| 1344 | err = pcim_iomap_regions(dev, 1 << SMBBAR, | 1434 | err = pcim_iomap_regions(dev, 1 << SMBBAR, |
| 1345 | dev_driver_string(&dev->dev)); | 1435 | dev_driver_string(&dev->dev)); |
| @@ -1348,6 +1438,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1348 | "Failed to request SMBus region 0x%lx-0x%Lx\n", | 1438 | "Failed to request SMBus region 0x%lx-0x%Lx\n", |
| 1349 | priv->smba, | 1439 | priv->smba, |
| 1350 | (unsigned long long)pci_resource_end(dev, SMBBAR)); | 1440 | (unsigned long long)pci_resource_end(dev, SMBBAR)); |
| 1441 | i801_acpi_remove(priv); | ||
| 1351 | return err; | 1442 | return err; |
| 1352 | } | 1443 | } |
| 1353 | 1444 | ||
| @@ -1412,6 +1503,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1412 | err = i2c_add_adapter(&priv->adapter); | 1503 | err = i2c_add_adapter(&priv->adapter); |
| 1413 | if (err) { | 1504 | if (err) { |
| 1414 | dev_err(&dev->dev, "Failed to add SMBus adapter\n"); | 1505 | dev_err(&dev->dev, "Failed to add SMBus adapter\n"); |
| 1506 | i801_acpi_remove(priv); | ||
| 1415 | return err; | 1507 | return err; |
| 1416 | } | 1508 | } |
| 1417 | 1509 | ||
| @@ -1438,6 +1530,7 @@ static void i801_remove(struct pci_dev *dev) | |||
| 1438 | 1530 | ||
| 1439 | i801_del_mux(priv); | 1531 | i801_del_mux(priv); |
| 1440 | i2c_del_adapter(&priv->adapter); | 1532 | i2c_del_adapter(&priv->adapter); |
| 1533 | i801_acpi_remove(priv); | ||
| 1441 | pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); | 1534 | pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); |
| 1442 | 1535 | ||
| 1443 | platform_device_unregister(priv->tco_pdev); | 1536 | platform_device_unregister(priv->tco_pdev); |
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c index aa5f01efd826..30ae35146723 100644 --- a/drivers/i2c/busses/i2c-octeon.c +++ b/drivers/i2c/busses/i2c-octeon.c | |||
| @@ -934,8 +934,15 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, | |||
| 934 | return result; | 934 | return result; |
| 935 | 935 | ||
| 936 | for (i = 0; i < length; i++) { | 936 | for (i = 0; i < length; i++) { |
| 937 | /* for the last byte TWSI_CTL_AAK must not be set */ | 937 | /* |
| 938 | if (i + 1 == length) | 938 | * For the last byte to receive TWSI_CTL_AAK must not be set. |
| 939 | * | ||
| 940 | * A special case is I2C_M_RECV_LEN where we don't know the | ||
| 941 | * additional length yet. If recv_len is set we assume we're | ||
| 942 | * not reading the final byte and therefore need to set | ||
| 943 | * TWSI_CTL_AAK. | ||
| 944 | */ | ||
| 945 | if ((i + 1 == length) && !(recv_len && i == 0)) | ||
| 939 | final_read = true; | 946 | final_read = true; |
| 940 | 947 | ||
| 941 | /* clear iflg to allow next event */ | 948 | /* clear iflg to allow next event */ |
| @@ -950,12 +957,8 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, | |||
| 950 | 957 | ||
| 951 | data[i] = octeon_i2c_data_read(i2c); | 958 | data[i] = octeon_i2c_data_read(i2c); |
| 952 | if (recv_len && i == 0) { | 959 | if (recv_len && i == 0) { |
| 953 | if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) { | 960 | if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) |
| 954 | dev_err(i2c->dev, | ||
| 955 | "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n", | ||
| 956 | __func__, data[i]); | ||
| 957 | return -EPROTO; | 961 | return -EPROTO; |
| 958 | } | ||
| 959 | length += data[i]; | 962 | length += data[i]; |
| 960 | } | 963 | } |
| 961 | 964 | ||
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c index 6773cadf7c9f..26e7c5187a58 100644 --- a/drivers/i2c/muxes/i2c-mux-reg.c +++ b/drivers/i2c/muxes/i2c-mux-reg.c | |||
| @@ -260,6 +260,7 @@ static struct platform_driver i2c_mux_reg_driver = { | |||
| 260 | .remove = i2c_mux_reg_remove, | 260 | .remove = i2c_mux_reg_remove, |
| 261 | .driver = { | 261 | .driver = { |
| 262 | .name = "i2c-mux-reg", | 262 | .name = "i2c-mux-reg", |
| 263 | .of_match_table = of_match_ptr(i2c_mux_reg_of_match), | ||
| 263 | }, | 264 | }, |
| 264 | }; | 265 | }; |
| 265 | 266 | ||
diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c index a1e642ee13d6..7fddc137e91e 100644 --- a/drivers/iio/accel/st_accel_buffer.c +++ b/drivers/iio/accel/st_accel_buffer.c | |||
| @@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = { | |||
| 91 | 91 | ||
| 92 | int st_accel_allocate_ring(struct iio_dev *indio_dev) | 92 | int st_accel_allocate_ring(struct iio_dev *indio_dev) |
| 93 | { | 93 | { |
| 94 | return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, | 94 | return iio_triggered_buffer_setup(indio_dev, NULL, |
| 95 | &st_sensors_trigger_handler, &st_accel_buffer_setup_ops); | 95 | &st_sensors_trigger_handler, &st_accel_buffer_setup_ops); |
| 96 | } | 96 | } |
| 97 | 97 | ||
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index dc73f2d85e6d..4d95bfc4786c 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c | |||
| @@ -741,6 +741,7 @@ static const struct iio_info accel_info = { | |||
| 741 | static const struct iio_trigger_ops st_accel_trigger_ops = { | 741 | static const struct iio_trigger_ops st_accel_trigger_ops = { |
| 742 | .owner = THIS_MODULE, | 742 | .owner = THIS_MODULE, |
| 743 | .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE, | 743 | .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE, |
| 744 | .validate_device = st_sensors_validate_device, | ||
| 744 | }; | 745 | }; |
| 745 | #define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops) | 746 | #define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops) |
| 746 | #else | 747 | #else |
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c index c55898543a47..f1693dbebb8a 100644 --- a/drivers/iio/common/st_sensors/st_sensors_buffer.c +++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c | |||
| @@ -57,31 +57,20 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p) | |||
| 57 | struct iio_poll_func *pf = p; | 57 | struct iio_poll_func *pf = p; |
| 58 | struct iio_dev *indio_dev = pf->indio_dev; | 58 | struct iio_dev *indio_dev = pf->indio_dev; |
| 59 | struct st_sensor_data *sdata = iio_priv(indio_dev); | 59 | struct st_sensor_data *sdata = iio_priv(indio_dev); |
| 60 | s64 timestamp; | ||
| 60 | 61 | ||
| 61 | /* If we have a status register, check if this IRQ came from us */ | 62 | /* If we do timetamping here, do it before reading the values */ |
| 62 | if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) { | 63 | if (sdata->hw_irq_trigger) |
| 63 | u8 status; | 64 | timestamp = sdata->hw_timestamp; |
| 64 | 65 | else | |
| 65 | len = sdata->tf->read_byte(&sdata->tb, sdata->dev, | 66 | timestamp = iio_get_time_ns(); |
| 66 | sdata->sensor_settings->drdy_irq.addr_stat_drdy, | ||
| 67 | &status); | ||
| 68 | if (len < 0) | ||
| 69 | dev_err(sdata->dev, "could not read channel status\n"); | ||
| 70 | |||
| 71 | /* | ||
| 72 | * If this was not caused by any channels on this sensor, | ||
| 73 | * return IRQ_NONE | ||
| 74 | */ | ||
| 75 | if (!(status & (u8)indio_dev->active_scan_mask[0])) | ||
| 76 | return IRQ_NONE; | ||
| 77 | } | ||
| 78 | 67 | ||
| 79 | len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data); | 68 | len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data); |
| 80 | if (len < 0) | 69 | if (len < 0) |
| 81 | goto st_sensors_get_buffer_element_error; | 70 | goto st_sensors_get_buffer_element_error; |
| 82 | 71 | ||
| 83 | iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data, | 72 | iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data, |
| 84 | pf->timestamp); | 73 | timestamp); |
| 85 | 74 | ||
| 86 | st_sensors_get_buffer_element_error: | 75 | st_sensors_get_buffer_element_error: |
| 87 | iio_trigger_notify_done(indio_dev->trig); | 76 | iio_trigger_notify_done(indio_dev->trig); |
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index dffe00692169..9e59c90f6a8d 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c | |||
| @@ -363,6 +363,11 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, | |||
| 363 | if (err < 0) | 363 | if (err < 0) |
| 364 | return err; | 364 | return err; |
| 365 | 365 | ||
| 366 | /* Disable DRDY, this might be still be enabled after reboot. */ | ||
| 367 | err = st_sensors_set_dataready_irq(indio_dev, false); | ||
| 368 | if (err < 0) | ||
| 369 | return err; | ||
| 370 | |||
| 366 | if (sdata->current_fullscale) { | 371 | if (sdata->current_fullscale) { |
| 367 | err = st_sensors_set_fullscale(indio_dev, | 372 | err = st_sensors_set_fullscale(indio_dev, |
| 368 | sdata->current_fullscale->num); | 373 | sdata->current_fullscale->num); |
| @@ -424,6 +429,9 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable) | |||
| 424 | else | 429 | else |
| 425 | drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2; | 430 | drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2; |
| 426 | 431 | ||
| 432 | /* Flag to the poll function that the hardware trigger is in use */ | ||
| 433 | sdata->hw_irq_trigger = enable; | ||
| 434 | |||
| 427 | /* Enable/Disable the interrupt generator for data ready. */ | 435 | /* Enable/Disable the interrupt generator for data ready. */ |
| 428 | err = st_sensors_write_data_with_mask(indio_dev, | 436 | err = st_sensors_write_data_with_mask(indio_dev, |
| 429 | sdata->sensor_settings->drdy_irq.addr, | 437 | sdata->sensor_settings->drdy_irq.addr, |
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c index da72279fcf99..296e4ff19ae8 100644 --- a/drivers/iio/common/st_sensors/st_sensors_trigger.c +++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c | |||
| @@ -17,6 +17,73 @@ | |||
| 17 | #include <linux/iio/common/st_sensors.h> | 17 | #include <linux/iio/common/st_sensors.h> |
| 18 | #include "st_sensors_core.h" | 18 | #include "st_sensors_core.h" |
| 19 | 19 | ||
| 20 | /** | ||
| 21 | * st_sensors_irq_handler() - top half of the IRQ-based triggers | ||
| 22 | * @irq: irq number | ||
| 23 | * @p: private handler data | ||
| 24 | */ | ||
| 25 | irqreturn_t st_sensors_irq_handler(int irq, void *p) | ||
| 26 | { | ||
| 27 | struct iio_trigger *trig = p; | ||
| 28 | struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); | ||
| 29 | struct st_sensor_data *sdata = iio_priv(indio_dev); | ||
| 30 | |||
| 31 | /* Get the time stamp as close in time as possible */ | ||
| 32 | sdata->hw_timestamp = iio_get_time_ns(); | ||
| 33 | return IRQ_WAKE_THREAD; | ||
| 34 | } | ||
| 35 | |||
| 36 | /** | ||
| 37 | * st_sensors_irq_thread() - bottom half of the IRQ-based triggers | ||
| 38 | * @irq: irq number | ||
| 39 | * @p: private handler data | ||
| 40 | */ | ||
| 41 | irqreturn_t st_sensors_irq_thread(int irq, void *p) | ||
| 42 | { | ||
| 43 | struct iio_trigger *trig = p; | ||
| 44 | struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); | ||
| 45 | struct st_sensor_data *sdata = iio_priv(indio_dev); | ||
| 46 | int ret; | ||
| 47 | |||
| 48 | /* | ||
| 49 | * If this trigger is backed by a hardware interrupt and we have a | ||
| 50 | * status register, check if this IRQ came from us | ||
| 51 | */ | ||
| 52 | if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) { | ||
| 53 | u8 status; | ||
| 54 | |||
| 55 | ret = sdata->tf->read_byte(&sdata->tb, sdata->dev, | ||
| 56 | sdata->sensor_settings->drdy_irq.addr_stat_drdy, | ||
| 57 | &status); | ||
| 58 | if (ret < 0) { | ||
| 59 | dev_err(sdata->dev, "could not read channel status\n"); | ||
| 60 | goto out_poll; | ||
| 61 | } | ||
| 62 | /* | ||
| 63 | * the lower bits of .active_scan_mask[0] is directly mapped | ||
| 64 | * to the channels on the sensor: either bit 0 for | ||
| 65 | * one-dimensional sensors, or e.g. x,y,z for accelerometers, | ||
| 66 | * gyroscopes or magnetometers. No sensor use more than 3 | ||
| 67 | * channels, so cut the other status bits here. | ||
| 68 | */ | ||
| 69 | status &= 0x07; | ||
| 70 | |||
| 71 | /* | ||
| 72 | * If this was not caused by any channels on this sensor, | ||
| 73 | * return IRQ_NONE | ||
| 74 | */ | ||
| 75 | if (!indio_dev->active_scan_mask) | ||
| 76 | return IRQ_NONE; | ||
| 77 | if (!(status & (u8)indio_dev->active_scan_mask[0])) | ||
| 78 | return IRQ_NONE; | ||
| 79 | } | ||
| 80 | |||
| 81 | out_poll: | ||
| 82 | /* It's our IRQ: proceed to handle the register polling */ | ||
| 83 | iio_trigger_poll_chained(p); | ||
| 84 | return IRQ_HANDLED; | ||
| 85 | } | ||
| 86 | |||
| 20 | int st_sensors_allocate_trigger(struct iio_dev *indio_dev, | 87 | int st_sensors_allocate_trigger(struct iio_dev *indio_dev, |
| 21 | const struct iio_trigger_ops *trigger_ops) | 88 | const struct iio_trigger_ops *trigger_ops) |
| 22 | { | 89 | { |
| @@ -30,6 +97,10 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, | |||
| 30 | return -ENOMEM; | 97 | return -ENOMEM; |
| 31 | } | 98 | } |
| 32 | 99 | ||
| 100 | iio_trigger_set_drvdata(sdata->trig, indio_dev); | ||
| 101 | sdata->trig->ops = trigger_ops; | ||
| 102 | sdata->trig->dev.parent = sdata->dev; | ||
| 103 | |||
| 33 | irq = sdata->get_irq_data_ready(indio_dev); | 104 | irq = sdata->get_irq_data_ready(indio_dev); |
| 34 | irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); | 105 | irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); |
| 35 | /* | 106 | /* |
| @@ -77,9 +148,12 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, | |||
| 77 | sdata->sensor_settings->drdy_irq.addr_stat_drdy) | 148 | sdata->sensor_settings->drdy_irq.addr_stat_drdy) |
| 78 | irq_trig |= IRQF_SHARED; | 149 | irq_trig |= IRQF_SHARED; |
| 79 | 150 | ||
| 80 | err = request_threaded_irq(irq, | 151 | /* Let's create an interrupt thread masking the hard IRQ here */ |
| 81 | iio_trigger_generic_data_rdy_poll, | 152 | irq_trig |= IRQF_ONESHOT; |
| 82 | NULL, | 153 | |
| 154 | err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev), | ||
| 155 | st_sensors_irq_handler, | ||
| 156 | st_sensors_irq_thread, | ||
| 83 | irq_trig, | 157 | irq_trig, |
| 84 | sdata->trig->name, | 158 | sdata->trig->name, |
| 85 | sdata->trig); | 159 | sdata->trig); |
| @@ -88,10 +162,6 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, | |||
| 88 | goto iio_trigger_free; | 162 | goto iio_trigger_free; |
| 89 | } | 163 | } |
| 90 | 164 | ||
| 91 | iio_trigger_set_drvdata(sdata->trig, indio_dev); | ||
| 92 | sdata->trig->ops = trigger_ops; | ||
| 93 | sdata->trig->dev.parent = sdata->dev; | ||
| 94 | |||
| 95 | err = iio_trigger_register(sdata->trig); | 165 | err = iio_trigger_register(sdata->trig); |
| 96 | if (err < 0) { | 166 | if (err < 0) { |
| 97 | dev_err(&indio_dev->dev, "failed to register iio trigger.\n"); | 167 | dev_err(&indio_dev->dev, "failed to register iio trigger.\n"); |
| @@ -119,6 +189,18 @@ void st_sensors_deallocate_trigger(struct iio_dev *indio_dev) | |||
| 119 | } | 189 | } |
| 120 | EXPORT_SYMBOL(st_sensors_deallocate_trigger); | 190 | EXPORT_SYMBOL(st_sensors_deallocate_trigger); |
| 121 | 191 | ||
| 192 | int st_sensors_validate_device(struct iio_trigger *trig, | ||
| 193 | struct iio_dev *indio_dev) | ||
| 194 | { | ||
| 195 | struct iio_dev *indio = iio_trigger_get_drvdata(trig); | ||
| 196 | |||
| 197 | if (indio != indio_dev) | ||
| 198 | return -EINVAL; | ||
| 199 | |||
| 200 | return 0; | ||
| 201 | } | ||
| 202 | EXPORT_SYMBOL(st_sensors_validate_device); | ||
| 203 | |||
| 122 | MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>"); | 204 | MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>"); |
| 123 | MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger"); | 205 | MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger"); |
| 124 | MODULE_LICENSE("GPL v2"); | 206 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig index e63b957c985f..f7c71da42f15 100644 --- a/drivers/iio/dac/Kconfig +++ b/drivers/iio/dac/Kconfig | |||
| @@ -247,7 +247,7 @@ config MCP4922 | |||
| 247 | 247 | ||
| 248 | config STX104 | 248 | config STX104 |
| 249 | tristate "Apex Embedded Systems STX104 DAC driver" | 249 | tristate "Apex Embedded Systems STX104 DAC driver" |
| 250 | depends on X86 && ISA | 250 | depends on X86 && ISA_BUS_API |
| 251 | help | 251 | help |
| 252 | Say yes here to build support for the 2-channel DAC on the Apex | 252 | Say yes here to build support for the 2-channel DAC on the Apex |
| 253 | Embedded Systems STX104 integrated analog PC/104 card. The base port | 253 | Embedded Systems STX104 integrated analog PC/104 card. The base port |
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c index 948f600e7059..69bde5909854 100644 --- a/drivers/iio/dac/ad5592r-base.c +++ b/drivers/iio/dac/ad5592r-base.c | |||
| @@ -525,7 +525,7 @@ static int ad5592r_alloc_channels(struct ad5592r_state *st) | |||
| 525 | 525 | ||
| 526 | device_for_each_child_node(st->dev, child) { | 526 | device_for_each_child_node(st->dev, child) { |
| 527 | ret = fwnode_property_read_u32(child, "reg", ®); | 527 | ret = fwnode_property_read_u32(child, "reg", ®); |
| 528 | if (ret || reg > ARRAY_SIZE(st->channel_modes)) | 528 | if (ret || reg >= ARRAY_SIZE(st->channel_modes)) |
| 529 | continue; | 529 | continue; |
| 530 | 530 | ||
| 531 | ret = fwnode_property_read_u32(child, "adi,mode", &tmp); | 531 | ret = fwnode_property_read_u32(child, "adi,mode", &tmp); |
diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c index d67b17b6a7aa..a5377044e42f 100644 --- a/drivers/iio/gyro/st_gyro_buffer.c +++ b/drivers/iio/gyro/st_gyro_buffer.c | |||
| @@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = { | |||
| 91 | 91 | ||
| 92 | int st_gyro_allocate_ring(struct iio_dev *indio_dev) | 92 | int st_gyro_allocate_ring(struct iio_dev *indio_dev) |
| 93 | { | 93 | { |
| 94 | return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, | 94 | return iio_triggered_buffer_setup(indio_dev, NULL, |
| 95 | &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops); | 95 | &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops); |
| 96 | } | 96 | } |
| 97 | 97 | ||
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c index 52a3c87c375c..a8012955a1f6 100644 --- a/drivers/iio/gyro/st_gyro_core.c +++ b/drivers/iio/gyro/st_gyro_core.c | |||
| @@ -409,6 +409,7 @@ static const struct iio_info gyro_info = { | |||
| 409 | static const struct iio_trigger_ops st_gyro_trigger_ops = { | 409 | static const struct iio_trigger_ops st_gyro_trigger_ops = { |
| 410 | .owner = THIS_MODULE, | 410 | .owner = THIS_MODULE, |
| 411 | .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE, | 411 | .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE, |
| 412 | .validate_device = st_sensors_validate_device, | ||
| 412 | }; | 413 | }; |
| 413 | #define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops) | 414 | #define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops) |
| 414 | #else | 415 | #else |
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c index 3be6d209a159..11535911a5c6 100644 --- a/drivers/iio/humidity/am2315.c +++ b/drivers/iio/humidity/am2315.c | |||
| @@ -165,10 +165,8 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p) | |||
| 165 | struct am2315_sensor_data sensor_data; | 165 | struct am2315_sensor_data sensor_data; |
| 166 | 166 | ||
| 167 | ret = am2315_read_data(data, &sensor_data); | 167 | ret = am2315_read_data(data, &sensor_data); |
| 168 | if (ret < 0) { | 168 | if (ret < 0) |
| 169 | mutex_unlock(&data->lock); | ||
| 170 | goto err; | 169 | goto err; |
| 171 | } | ||
| 172 | 170 | ||
| 173 | mutex_lock(&data->lock); | 171 | mutex_lock(&data->lock); |
| 174 | if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) { | 172 | if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) { |
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index fa4767613173..a03832a5fc95 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c | |||
| @@ -55,7 +55,7 @@ static const struct { | |||
| 55 | }, | 55 | }, |
| 56 | { /* IIO_HUMIDITYRELATIVE channel */ | 56 | { /* IIO_HUMIDITYRELATIVE channel */ |
| 57 | .shift = 8, | 57 | .shift = 8, |
| 58 | .mask = 2, | 58 | .mask = 3, |
| 59 | }, | 59 | }, |
| 60 | }; | 60 | }; |
| 61 | 61 | ||
| @@ -164,14 +164,14 @@ static int hdc100x_get_measurement(struct hdc100x_data *data, | |||
| 164 | dev_err(&client->dev, "cannot read high byte measurement"); | 164 | dev_err(&client->dev, "cannot read high byte measurement"); |
| 165 | return ret; | 165 | return ret; |
| 166 | } | 166 | } |
| 167 | val = ret << 6; | 167 | val = ret << 8; |
| 168 | 168 | ||
| 169 | ret = i2c_smbus_read_byte(client); | 169 | ret = i2c_smbus_read_byte(client); |
| 170 | if (ret < 0) { | 170 | if (ret < 0) { |
| 171 | dev_err(&client->dev, "cannot read low byte measurement"); | 171 | dev_err(&client->dev, "cannot read low byte measurement"); |
| 172 | return ret; | 172 | return ret; |
| 173 | } | 173 | } |
| 174 | val |= ret >> 2; | 174 | val |= ret; |
| 175 | 175 | ||
| 176 | return val; | 176 | return val; |
| 177 | } | 177 | } |
| @@ -211,18 +211,18 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev, | |||
| 211 | return IIO_VAL_INT_PLUS_MICRO; | 211 | return IIO_VAL_INT_PLUS_MICRO; |
| 212 | case IIO_CHAN_INFO_SCALE: | 212 | case IIO_CHAN_INFO_SCALE: |
| 213 | if (chan->type == IIO_TEMP) { | 213 | if (chan->type == IIO_TEMP) { |
| 214 | *val = 165; | 214 | *val = 165000; |
| 215 | *val2 = 65536 >> 2; | 215 | *val2 = 65536; |
| 216 | return IIO_VAL_FRACTIONAL; | 216 | return IIO_VAL_FRACTIONAL; |
| 217 | } else { | 217 | } else { |
| 218 | *val = 0; | 218 | *val = 100; |
| 219 | *val2 = 10000; | 219 | *val2 = 65536; |
| 220 | return IIO_VAL_INT_PLUS_MICRO; | 220 | return IIO_VAL_FRACTIONAL; |
| 221 | } | 221 | } |
| 222 | break; | 222 | break; |
| 223 | case IIO_CHAN_INFO_OFFSET: | 223 | case IIO_CHAN_INFO_OFFSET: |
| 224 | *val = -3971; | 224 | *val = -15887; |
| 225 | *val2 = 879096; | 225 | *val2 = 515151; |
| 226 | return IIO_VAL_INT_PLUS_MICRO; | 226 | return IIO_VAL_INT_PLUS_MICRO; |
| 227 | default: | 227 | default: |
| 228 | return -EINVAL; | 228 | return -EINVAL; |
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c index 0bf92b06d7d8..b8a290ec984e 100644 --- a/drivers/iio/imu/bmi160/bmi160_core.c +++ b/drivers/iio/imu/bmi160/bmi160_core.c | |||
| @@ -209,11 +209,11 @@ static const struct bmi160_scale_item bmi160_scale_table[] = { | |||
| 209 | }; | 209 | }; |
| 210 | 210 | ||
| 211 | static const struct bmi160_odr bmi160_accel_odr[] = { | 211 | static const struct bmi160_odr bmi160_accel_odr[] = { |
| 212 | {0x01, 0, 78125}, | 212 | {0x01, 0, 781250}, |
| 213 | {0x02, 1, 5625}, | 213 | {0x02, 1, 562500}, |
| 214 | {0x03, 3, 125}, | 214 | {0x03, 3, 125000}, |
| 215 | {0x04, 6, 25}, | 215 | {0x04, 6, 250000}, |
| 216 | {0x05, 12, 5}, | 216 | {0x05, 12, 500000}, |
| 217 | {0x06, 25, 0}, | 217 | {0x06, 25, 0}, |
| 218 | {0x07, 50, 0}, | 218 | {0x07, 50, 0}, |
| 219 | {0x08, 100, 0}, | 219 | {0x08, 100, 0}, |
| @@ -229,7 +229,7 @@ static const struct bmi160_odr bmi160_gyro_odr[] = { | |||
| 229 | {0x08, 100, 0}, | 229 | {0x08, 100, 0}, |
| 230 | {0x09, 200, 0}, | 230 | {0x09, 200, 0}, |
| 231 | {0x0A, 400, 0}, | 231 | {0x0A, 400, 0}, |
| 232 | {0x0B, 8000, 0}, | 232 | {0x0B, 800, 0}, |
| 233 | {0x0C, 1600, 0}, | 233 | {0x0C, 1600, 0}, |
| 234 | {0x0D, 3200, 0}, | 234 | {0x0D, 3200, 0}, |
| 235 | }; | 235 | }; |
| @@ -364,8 +364,8 @@ int bmi160_set_odr(struct bmi160_data *data, enum bmi160_sensor_type t, | |||
| 364 | 364 | ||
| 365 | return regmap_update_bits(data->regmap, | 365 | return regmap_update_bits(data->regmap, |
| 366 | bmi160_regs[t].config, | 366 | bmi160_regs[t].config, |
| 367 | bmi160_odr_table[t].tbl[i].bits, | 367 | bmi160_regs[t].config_odr_mask, |
| 368 | bmi160_regs[t].config_odr_mask); | 368 | bmi160_odr_table[t].tbl[i].bits); |
| 369 | } | 369 | } |
| 370 | 370 | ||
| 371 | static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t, | 371 | static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t, |
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index ae2806aafb72..0c52dfe64977 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c | |||
| @@ -210,22 +210,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig, | |||
| 210 | 210 | ||
| 211 | /* Prevent the module from being removed whilst attached to a trigger */ | 211 | /* Prevent the module from being removed whilst attached to a trigger */ |
| 212 | __module_get(pf->indio_dev->info->driver_module); | 212 | __module_get(pf->indio_dev->info->driver_module); |
| 213 | |||
| 214 | /* Get irq number */ | ||
| 213 | pf->irq = iio_trigger_get_irq(trig); | 215 | pf->irq = iio_trigger_get_irq(trig); |
| 216 | if (pf->irq < 0) | ||
| 217 | goto out_put_module; | ||
| 218 | |||
| 219 | /* Request irq */ | ||
| 214 | ret = request_threaded_irq(pf->irq, pf->h, pf->thread, | 220 | ret = request_threaded_irq(pf->irq, pf->h, pf->thread, |
| 215 | pf->type, pf->name, | 221 | pf->type, pf->name, |
| 216 | pf); | 222 | pf); |
| 217 | if (ret < 0) { | 223 | if (ret < 0) |
| 218 | module_put(pf->indio_dev->info->driver_module); | 224 | goto out_put_irq; |
| 219 | return ret; | ||
| 220 | } | ||
| 221 | 225 | ||
| 226 | /* Enable trigger in driver */ | ||
| 222 | if (trig->ops && trig->ops->set_trigger_state && notinuse) { | 227 | if (trig->ops && trig->ops->set_trigger_state && notinuse) { |
| 223 | ret = trig->ops->set_trigger_state(trig, true); | 228 | ret = trig->ops->set_trigger_state(trig, true); |
| 224 | if (ret < 0) | 229 | if (ret < 0) |
| 225 | module_put(pf->indio_dev->info->driver_module); | 230 | goto out_free_irq; |
| 226 | } | 231 | } |
| 227 | 232 | ||
| 228 | return ret; | 233 | return ret; |
| 234 | |||
| 235 | out_free_irq: | ||
| 236 | free_irq(pf->irq, pf); | ||
| 237 | out_put_irq: | ||
| 238 | iio_trigger_put_irq(trig, pf->irq); | ||
| 239 | out_put_module: | ||
| 240 | module_put(pf->indio_dev->info->driver_module); | ||
| 241 | return ret; | ||
| 229 | } | 242 | } |
| 230 | 243 | ||
| 231 | static int iio_trigger_detach_poll_func(struct iio_trigger *trig, | 244 | static int iio_trigger_detach_poll_func(struct iio_trigger *trig, |
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c index b4dbb3912977..651d57b8abbf 100644 --- a/drivers/iio/light/apds9960.c +++ b/drivers/iio/light/apds9960.c | |||
| @@ -1011,6 +1011,7 @@ static int apds9960_probe(struct i2c_client *client, | |||
| 1011 | 1011 | ||
| 1012 | iio_device_attach_buffer(indio_dev, buffer); | 1012 | iio_device_attach_buffer(indio_dev, buffer); |
| 1013 | 1013 | ||
| 1014 | indio_dev->dev.parent = &client->dev; | ||
| 1014 | indio_dev->info = &apds9960_info; | 1015 | indio_dev->info = &apds9960_info; |
| 1015 | indio_dev->name = APDS9960_DRV_NAME; | 1016 | indio_dev->name = APDS9960_DRV_NAME; |
| 1016 | indio_dev->channels = apds9960_channels; | 1017 | indio_dev->channels = apds9960_channels; |
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c index 72b364e4aa72..b54dcba05a82 100644 --- a/drivers/iio/light/bh1780.c +++ b/drivers/iio/light/bh1780.c | |||
| @@ -84,7 +84,7 @@ static int bh1780_debugfs_reg_access(struct iio_dev *indio_dev, | |||
| 84 | int ret; | 84 | int ret; |
| 85 | 85 | ||
| 86 | if (!readval) | 86 | if (!readval) |
| 87 | bh1780_write(bh1780, (u8)reg, (u8)writeval); | 87 | return bh1780_write(bh1780, (u8)reg, (u8)writeval); |
| 88 | 88 | ||
| 89 | ret = bh1780_read(bh1780, (u8)reg); | 89 | ret = bh1780_read(bh1780, (u8)reg); |
| 90 | if (ret < 0) | 90 | if (ret < 0) |
| @@ -187,7 +187,7 @@ static int bh1780_probe(struct i2c_client *client, | |||
| 187 | 187 | ||
| 188 | indio_dev->dev.parent = &client->dev; | 188 | indio_dev->dev.parent = &client->dev; |
| 189 | indio_dev->info = &bh1780_info; | 189 | indio_dev->info = &bh1780_info; |
| 190 | indio_dev->name = id->name; | 190 | indio_dev->name = "bh1780"; |
| 191 | indio_dev->channels = bh1780_channels; | 191 | indio_dev->channels = bh1780_channels; |
| 192 | indio_dev->num_channels = ARRAY_SIZE(bh1780_channels); | 192 | indio_dev->num_channels = ARRAY_SIZE(bh1780_channels); |
| 193 | indio_dev->modes = INDIO_DIRECT_MODE; | 193 | indio_dev->modes = INDIO_DIRECT_MODE; |
| @@ -226,7 +226,8 @@ static int bh1780_remove(struct i2c_client *client) | |||
| 226 | static int bh1780_runtime_suspend(struct device *dev) | 226 | static int bh1780_runtime_suspend(struct device *dev) |
| 227 | { | 227 | { |
| 228 | struct i2c_client *client = to_i2c_client(dev); | 228 | struct i2c_client *client = to_i2c_client(dev); |
| 229 | struct bh1780_data *bh1780 = i2c_get_clientdata(client); | 229 | struct iio_dev *indio_dev = i2c_get_clientdata(client); |
| 230 | struct bh1780_data *bh1780 = iio_priv(indio_dev); | ||
| 230 | int ret; | 231 | int ret; |
| 231 | 232 | ||
| 232 | ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF); | 233 | ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF); |
| @@ -241,7 +242,8 @@ static int bh1780_runtime_suspend(struct device *dev) | |||
| 241 | static int bh1780_runtime_resume(struct device *dev) | 242 | static int bh1780_runtime_resume(struct device *dev) |
| 242 | { | 243 | { |
| 243 | struct i2c_client *client = to_i2c_client(dev); | 244 | struct i2c_client *client = to_i2c_client(dev); |
| 244 | struct bh1780_data *bh1780 = i2c_get_clientdata(client); | 245 | struct iio_dev *indio_dev = i2c_get_clientdata(client); |
| 246 | struct bh1780_data *bh1780 = iio_priv(indio_dev); | ||
| 245 | int ret; | 247 | int ret; |
| 246 | 248 | ||
| 247 | ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON); | 249 | ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON); |
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c index e01e58a9bd14..f17cb2ea18f5 100644 --- a/drivers/iio/light/max44000.c +++ b/drivers/iio/light/max44000.c | |||
| @@ -147,7 +147,6 @@ static const struct iio_chan_spec max44000_channels[] = { | |||
| 147 | { | 147 | { |
| 148 | .type = IIO_PROXIMITY, | 148 | .type = IIO_PROXIMITY, |
| 149 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), | 149 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), |
| 150 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), | ||
| 151 | .scan_index = MAX44000_SCAN_INDEX_PRX, | 150 | .scan_index = MAX44000_SCAN_INDEX_PRX, |
| 152 | .scan_type = { | 151 | .scan_type = { |
| 153 | .sign = 'u', | 152 | .sign = 'u', |
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c index ecd3bd0a9769..0a9e8fadfa9d 100644 --- a/drivers/iio/magnetometer/st_magn_buffer.c +++ b/drivers/iio/magnetometer/st_magn_buffer.c | |||
| @@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = { | |||
| 82 | 82 | ||
| 83 | int st_magn_allocate_ring(struct iio_dev *indio_dev) | 83 | int st_magn_allocate_ring(struct iio_dev *indio_dev) |
| 84 | { | 84 | { |
| 85 | return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, | 85 | return iio_triggered_buffer_setup(indio_dev, NULL, |
| 86 | &st_sensors_trigger_handler, &st_magn_buffer_setup_ops); | 86 | &st_sensors_trigger_handler, &st_magn_buffer_setup_ops); |
| 87 | } | 87 | } |
| 88 | 88 | ||
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 62036d2a9956..8250fc322c56 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c | |||
| @@ -572,6 +572,7 @@ static const struct iio_info magn_info = { | |||
| 572 | static const struct iio_trigger_ops st_magn_trigger_ops = { | 572 | static const struct iio_trigger_ops st_magn_trigger_ops = { |
| 573 | .owner = THIS_MODULE, | 573 | .owner = THIS_MODULE, |
| 574 | .set_trigger_state = ST_MAGN_TRIGGER_SET_STATE, | 574 | .set_trigger_state = ST_MAGN_TRIGGER_SET_STATE, |
| 575 | .validate_device = st_sensors_validate_device, | ||
| 575 | }; | 576 | }; |
| 576 | #define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops) | 577 | #define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops) |
| 577 | #else | 578 | #else |
diff --git a/drivers/iio/pressure/bmp280.c b/drivers/iio/pressure/bmp280.c index 2f1498e12bb2..724452d61846 100644 --- a/drivers/iio/pressure/bmp280.c +++ b/drivers/iio/pressure/bmp280.c | |||
| @@ -879,8 +879,8 @@ static int bmp280_probe(struct i2c_client *client, | |||
| 879 | if (ret < 0) | 879 | if (ret < 0) |
| 880 | return ret; | 880 | return ret; |
| 881 | if (chip_id != id->driver_data) { | 881 | if (chip_id != id->driver_data) { |
| 882 | dev_err(&client->dev, "bad chip id. expected %x got %x\n", | 882 | dev_err(&client->dev, "bad chip id. expected %lx got %x\n", |
| 883 | BMP280_CHIP_ID, chip_id); | 883 | id->driver_data, chip_id); |
| 884 | return -EINVAL; | 884 | return -EINVAL; |
| 885 | } | 885 | } |
| 886 | 886 | ||
diff --git a/drivers/iio/pressure/st_pressure_buffer.c b/drivers/iio/pressure/st_pressure_buffer.c index 2ff53f222352..99468d0a64e7 100644 --- a/drivers/iio/pressure/st_pressure_buffer.c +++ b/drivers/iio/pressure/st_pressure_buffer.c | |||
| @@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_press_buffer_setup_ops = { | |||
| 82 | 82 | ||
| 83 | int st_press_allocate_ring(struct iio_dev *indio_dev) | 83 | int st_press_allocate_ring(struct iio_dev *indio_dev) |
| 84 | { | 84 | { |
| 85 | return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, | 85 | return iio_triggered_buffer_setup(indio_dev, NULL, |
| 86 | &st_sensors_trigger_handler, &st_press_buffer_setup_ops); | 86 | &st_sensors_trigger_handler, &st_press_buffer_setup_ops); |
| 87 | } | 87 | } |
| 88 | 88 | ||
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 9e9b72a8f18f..92a118c3c4ac 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c | |||
| @@ -28,15 +28,21 @@ | |||
| 28 | #include <linux/iio/common/st_sensors.h> | 28 | #include <linux/iio/common/st_sensors.h> |
| 29 | #include "st_pressure.h" | 29 | #include "st_pressure.h" |
| 30 | 30 | ||
| 31 | #define MCELSIUS_PER_CELSIUS 1000 | ||
| 32 | |||
| 33 | /* Default pressure sensitivity */ | ||
| 31 | #define ST_PRESS_LSB_PER_MBAR 4096UL | 34 | #define ST_PRESS_LSB_PER_MBAR 4096UL |
| 32 | #define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \ | 35 | #define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \ |
| 33 | ST_PRESS_LSB_PER_MBAR) | 36 | ST_PRESS_LSB_PER_MBAR) |
| 37 | |||
| 38 | /* Default temperature sensitivity */ | ||
| 34 | #define ST_PRESS_LSB_PER_CELSIUS 480UL | 39 | #define ST_PRESS_LSB_PER_CELSIUS 480UL |
| 35 | #define ST_PRESS_CELSIUS_NANO_SCALE (1000000000UL / \ | 40 | #define ST_PRESS_MILLI_CELSIUS_OFFSET 42500UL |
| 36 | ST_PRESS_LSB_PER_CELSIUS) | 41 | |
| 37 | #define ST_PRESS_NUMBER_DATA_CHANNELS 1 | 42 | #define ST_PRESS_NUMBER_DATA_CHANNELS 1 |
| 38 | 43 | ||
| 39 | /* FULLSCALE */ | 44 | /* FULLSCALE */ |
| 45 | #define ST_PRESS_FS_AVL_1100MB 1100 | ||
| 40 | #define ST_PRESS_FS_AVL_1260MB 1260 | 46 | #define ST_PRESS_FS_AVL_1260MB 1260 |
| 41 | 47 | ||
| 42 | #define ST_PRESS_1_OUT_XL_ADDR 0x28 | 48 | #define ST_PRESS_1_OUT_XL_ADDR 0x28 |
| @@ -54,9 +60,6 @@ | |||
| 54 | #define ST_PRESS_LPS331AP_PW_MASK 0x80 | 60 | #define ST_PRESS_LPS331AP_PW_MASK 0x80 |
| 55 | #define ST_PRESS_LPS331AP_FS_ADDR 0x23 | 61 | #define ST_PRESS_LPS331AP_FS_ADDR 0x23 |
| 56 | #define ST_PRESS_LPS331AP_FS_MASK 0x30 | 62 | #define ST_PRESS_LPS331AP_FS_MASK 0x30 |
| 57 | #define ST_PRESS_LPS331AP_FS_AVL_1260_VAL 0x00 | ||
| 58 | #define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE | ||
| 59 | #define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE | ||
| 60 | #define ST_PRESS_LPS331AP_BDU_ADDR 0x20 | 63 | #define ST_PRESS_LPS331AP_BDU_ADDR 0x20 |
| 61 | #define ST_PRESS_LPS331AP_BDU_MASK 0x04 | 64 | #define ST_PRESS_LPS331AP_BDU_MASK 0x04 |
| 62 | #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22 | 65 | #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22 |
| @@ -67,9 +70,14 @@ | |||
| 67 | #define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22 | 70 | #define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22 |
| 68 | #define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40 | 71 | #define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40 |
| 69 | #define ST_PRESS_LPS331AP_MULTIREAD_BIT true | 72 | #define ST_PRESS_LPS331AP_MULTIREAD_BIT true |
| 70 | #define ST_PRESS_LPS331AP_TEMP_OFFSET 42500 | ||
| 71 | 73 | ||
| 72 | /* CUSTOM VALUES FOR LPS001WP SENSOR */ | 74 | /* CUSTOM VALUES FOR LPS001WP SENSOR */ |
| 75 | |||
| 76 | /* LPS001WP pressure resolution */ | ||
| 77 | #define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL | ||
| 78 | /* LPS001WP temperature resolution */ | ||
| 79 | #define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL | ||
| 80 | |||
| 73 | #define ST_PRESS_LPS001WP_WAI_EXP 0xba | 81 | #define ST_PRESS_LPS001WP_WAI_EXP 0xba |
| 74 | #define ST_PRESS_LPS001WP_ODR_ADDR 0x20 | 82 | #define ST_PRESS_LPS001WP_ODR_ADDR 0x20 |
| 75 | #define ST_PRESS_LPS001WP_ODR_MASK 0x30 | 83 | #define ST_PRESS_LPS001WP_ODR_MASK 0x30 |
| @@ -78,6 +86,8 @@ | |||
| 78 | #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03 | 86 | #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03 |
| 79 | #define ST_PRESS_LPS001WP_PW_ADDR 0x20 | 87 | #define ST_PRESS_LPS001WP_PW_ADDR 0x20 |
| 80 | #define ST_PRESS_LPS001WP_PW_MASK 0x40 | 88 | #define ST_PRESS_LPS001WP_PW_MASK 0x40 |
| 89 | #define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \ | ||
| 90 | (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR) | ||
| 81 | #define ST_PRESS_LPS001WP_BDU_ADDR 0x20 | 91 | #define ST_PRESS_LPS001WP_BDU_ADDR 0x20 |
| 82 | #define ST_PRESS_LPS001WP_BDU_MASK 0x04 | 92 | #define ST_PRESS_LPS001WP_BDU_MASK 0x04 |
| 83 | #define ST_PRESS_LPS001WP_MULTIREAD_BIT true | 93 | #define ST_PRESS_LPS001WP_MULTIREAD_BIT true |
| @@ -94,11 +104,6 @@ | |||
| 94 | #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04 | 104 | #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04 |
| 95 | #define ST_PRESS_LPS25H_PW_ADDR 0x20 | 105 | #define ST_PRESS_LPS25H_PW_ADDR 0x20 |
| 96 | #define ST_PRESS_LPS25H_PW_MASK 0x80 | 106 | #define ST_PRESS_LPS25H_PW_MASK 0x80 |
| 97 | #define ST_PRESS_LPS25H_FS_ADDR 0x00 | ||
| 98 | #define ST_PRESS_LPS25H_FS_MASK 0x00 | ||
| 99 | #define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00 | ||
| 100 | #define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE | ||
| 101 | #define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE | ||
| 102 | #define ST_PRESS_LPS25H_BDU_ADDR 0x20 | 107 | #define ST_PRESS_LPS25H_BDU_ADDR 0x20 |
| 103 | #define ST_PRESS_LPS25H_BDU_MASK 0x04 | 108 | #define ST_PRESS_LPS25H_BDU_MASK 0x04 |
| 104 | #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23 | 109 | #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23 |
| @@ -109,7 +114,6 @@ | |||
| 109 | #define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22 | 114 | #define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22 |
| 110 | #define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40 | 115 | #define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40 |
| 111 | #define ST_PRESS_LPS25H_MULTIREAD_BIT true | 116 | #define ST_PRESS_LPS25H_MULTIREAD_BIT true |
| 112 | #define ST_PRESS_LPS25H_TEMP_OFFSET 42500 | ||
| 113 | #define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28 | 117 | #define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28 |
| 114 | #define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b | 118 | #define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b |
| 115 | 119 | ||
| @@ -161,7 +165,9 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = { | |||
| 161 | .storagebits = 16, | 165 | .storagebits = 16, |
| 162 | .endianness = IIO_LE, | 166 | .endianness = IIO_LE, |
| 163 | }, | 167 | }, |
| 164 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), | 168 | .info_mask_separate = |
| 169 | BIT(IIO_CHAN_INFO_RAW) | | ||
| 170 | BIT(IIO_CHAN_INFO_SCALE), | ||
| 165 | .modified = 0, | 171 | .modified = 0, |
| 166 | }, | 172 | }, |
| 167 | { | 173 | { |
| @@ -177,7 +183,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = { | |||
| 177 | }, | 183 | }, |
| 178 | .info_mask_separate = | 184 | .info_mask_separate = |
| 179 | BIT(IIO_CHAN_INFO_RAW) | | 185 | BIT(IIO_CHAN_INFO_RAW) | |
| 180 | BIT(IIO_CHAN_INFO_OFFSET), | 186 | BIT(IIO_CHAN_INFO_SCALE), |
| 181 | .modified = 0, | 187 | .modified = 0, |
| 182 | }, | 188 | }, |
| 183 | IIO_CHAN_SOFT_TIMESTAMP(1) | 189 | IIO_CHAN_SOFT_TIMESTAMP(1) |
| @@ -212,11 +218,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { | |||
| 212 | .addr = ST_PRESS_LPS331AP_FS_ADDR, | 218 | .addr = ST_PRESS_LPS331AP_FS_ADDR, |
| 213 | .mask = ST_PRESS_LPS331AP_FS_MASK, | 219 | .mask = ST_PRESS_LPS331AP_FS_MASK, |
| 214 | .fs_avl = { | 220 | .fs_avl = { |
| 221 | /* | ||
| 222 | * Pressure and temperature sensitivity values | ||
| 223 | * as defined in table 3 of LPS331AP datasheet. | ||
| 224 | */ | ||
| 215 | [0] = { | 225 | [0] = { |
| 216 | .num = ST_PRESS_FS_AVL_1260MB, | 226 | .num = ST_PRESS_FS_AVL_1260MB, |
| 217 | .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL, | 227 | .gain = ST_PRESS_KPASCAL_NANO_SCALE, |
| 218 | .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN, | 228 | .gain2 = ST_PRESS_LSB_PER_CELSIUS, |
| 219 | .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN, | ||
| 220 | }, | 229 | }, |
| 221 | }, | 230 | }, |
| 222 | }, | 231 | }, |
| @@ -261,7 +270,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { | |||
| 261 | .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, | 270 | .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, |
| 262 | }, | 271 | }, |
| 263 | .fs = { | 272 | .fs = { |
| 264 | .addr = 0, | 273 | .fs_avl = { |
| 274 | /* | ||
| 275 | * Pressure and temperature resolution values | ||
| 276 | * as defined in table 3 of LPS001WP datasheet. | ||
| 277 | */ | ||
| 278 | [0] = { | ||
| 279 | .num = ST_PRESS_FS_AVL_1100MB, | ||
| 280 | .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN, | ||
| 281 | .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS, | ||
| 282 | }, | ||
| 283 | }, | ||
| 265 | }, | 284 | }, |
| 266 | .bdu = { | 285 | .bdu = { |
| 267 | .addr = ST_PRESS_LPS001WP_BDU_ADDR, | 286 | .addr = ST_PRESS_LPS001WP_BDU_ADDR, |
| @@ -298,14 +317,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { | |||
| 298 | .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, | 317 | .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, |
| 299 | }, | 318 | }, |
| 300 | .fs = { | 319 | .fs = { |
| 301 | .addr = ST_PRESS_LPS25H_FS_ADDR, | ||
| 302 | .mask = ST_PRESS_LPS25H_FS_MASK, | ||
| 303 | .fs_avl = { | 320 | .fs_avl = { |
| 321 | /* | ||
| 322 | * Pressure and temperature sensitivity values | ||
| 323 | * as defined in table 3 of LPS25H datasheet. | ||
| 324 | */ | ||
| 304 | [0] = { | 325 | [0] = { |
| 305 | .num = ST_PRESS_FS_AVL_1260MB, | 326 | .num = ST_PRESS_FS_AVL_1260MB, |
| 306 | .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL, | 327 | .gain = ST_PRESS_KPASCAL_NANO_SCALE, |
| 307 | .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN, | 328 | .gain2 = ST_PRESS_LSB_PER_CELSIUS, |
| 308 | .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN, | ||
| 309 | }, | 329 | }, |
| 310 | }, | 330 | }, |
| 311 | }, | 331 | }, |
| @@ -364,26 +384,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev, | |||
| 364 | 384 | ||
| 365 | return IIO_VAL_INT; | 385 | return IIO_VAL_INT; |
| 366 | case IIO_CHAN_INFO_SCALE: | 386 | case IIO_CHAN_INFO_SCALE: |
| 367 | *val = 0; | ||
| 368 | |||
| 369 | switch (ch->type) { | 387 | switch (ch->type) { |
| 370 | case IIO_PRESSURE: | 388 | case IIO_PRESSURE: |
| 389 | *val = 0; | ||
| 371 | *val2 = press_data->current_fullscale->gain; | 390 | *val2 = press_data->current_fullscale->gain; |
| 372 | break; | 391 | return IIO_VAL_INT_PLUS_NANO; |
| 373 | case IIO_TEMP: | 392 | case IIO_TEMP: |
| 393 | *val = MCELSIUS_PER_CELSIUS; | ||
| 374 | *val2 = press_data->current_fullscale->gain2; | 394 | *val2 = press_data->current_fullscale->gain2; |
| 375 | break; | 395 | return IIO_VAL_FRACTIONAL; |
| 376 | default: | 396 | default: |
| 377 | err = -EINVAL; | 397 | err = -EINVAL; |
| 378 | goto read_error; | 398 | goto read_error; |
| 379 | } | 399 | } |
| 380 | 400 | ||
| 381 | return IIO_VAL_INT_PLUS_NANO; | ||
| 382 | case IIO_CHAN_INFO_OFFSET: | 401 | case IIO_CHAN_INFO_OFFSET: |
| 383 | switch (ch->type) { | 402 | switch (ch->type) { |
| 384 | case IIO_TEMP: | 403 | case IIO_TEMP: |
| 385 | *val = 425; | 404 | *val = ST_PRESS_MILLI_CELSIUS_OFFSET * |
| 386 | *val2 = 10; | 405 | press_data->current_fullscale->gain2; |
| 406 | *val2 = MCELSIUS_PER_CELSIUS; | ||
| 387 | break; | 407 | break; |
| 388 | default: | 408 | default: |
| 389 | err = -EINVAL; | 409 | err = -EINVAL; |
| @@ -425,6 +445,7 @@ static const struct iio_info press_info = { | |||
| 425 | static const struct iio_trigger_ops st_press_trigger_ops = { | 445 | static const struct iio_trigger_ops st_press_trigger_ops = { |
| 426 | .owner = THIS_MODULE, | 446 | .owner = THIS_MODULE, |
| 427 | .set_trigger_state = ST_PRESS_TRIGGER_SET_STATE, | 447 | .set_trigger_state = ST_PRESS_TRIGGER_SET_STATE, |
| 448 | .validate_device = st_sensors_validate_device, | ||
| 428 | }; | 449 | }; |
| 429 | #define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops) | 450 | #define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops) |
| 430 | #else | 451 | #else |
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c index f4d29d5dbd5f..e2f926cdcad2 100644 --- a/drivers/iio/proximity/as3935.c +++ b/drivers/iio/proximity/as3935.c | |||
| @@ -64,6 +64,7 @@ struct as3935_state { | |||
| 64 | struct delayed_work work; | 64 | struct delayed_work work; |
| 65 | 65 | ||
| 66 | u32 tune_cap; | 66 | u32 tune_cap; |
| 67 | u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */ | ||
| 67 | u8 buf[2] ____cacheline_aligned; | 68 | u8 buf[2] ____cacheline_aligned; |
| 68 | }; | 69 | }; |
| 69 | 70 | ||
| @@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = { | |||
| 72 | .type = IIO_PROXIMITY, | 73 | .type = IIO_PROXIMITY, |
| 73 | .info_mask_separate = | 74 | .info_mask_separate = |
| 74 | BIT(IIO_CHAN_INFO_RAW) | | 75 | BIT(IIO_CHAN_INFO_RAW) | |
| 75 | BIT(IIO_CHAN_INFO_PROCESSED), | 76 | BIT(IIO_CHAN_INFO_PROCESSED) | |
| 77 | BIT(IIO_CHAN_INFO_SCALE), | ||
| 76 | .scan_index = 0, | 78 | .scan_index = 0, |
| 77 | .scan_type = { | 79 | .scan_type = { |
| 78 | .sign = 'u', | 80 | .sign = 'u', |
| @@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev, | |||
| 181 | /* storm out of range */ | 183 | /* storm out of range */ |
| 182 | if (*val == AS3935_DATA_MASK) | 184 | if (*val == AS3935_DATA_MASK) |
| 183 | return -EINVAL; | 185 | return -EINVAL; |
| 184 | *val *= 1000; | 186 | |
| 187 | if (m == IIO_CHAN_INFO_PROCESSED) | ||
| 188 | *val *= 1000; | ||
| 189 | break; | ||
| 190 | case IIO_CHAN_INFO_SCALE: | ||
| 191 | *val = 1000; | ||
| 185 | break; | 192 | break; |
| 186 | default: | 193 | default: |
| 187 | return -EINVAL; | 194 | return -EINVAL; |
| @@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private) | |||
| 206 | ret = as3935_read(st, AS3935_DATA, &val); | 213 | ret = as3935_read(st, AS3935_DATA, &val); |
| 207 | if (ret) | 214 | if (ret) |
| 208 | goto err_read; | 215 | goto err_read; |
| 209 | val &= AS3935_DATA_MASK; | ||
| 210 | val *= 1000; | ||
| 211 | 216 | ||
| 212 | iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp); | 217 | st->buffer[0] = val & AS3935_DATA_MASK; |
| 218 | iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer, | ||
| 219 | pf->timestamp); | ||
| 213 | err_read: | 220 | err_read: |
| 214 | iio_trigger_notify_done(indio_dev->trig); | 221 | iio_trigger_notify_done(indio_dev->trig); |
| 215 | 222 | ||
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index c2e257d97eff..1a2984c28b95 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
| @@ -178,6 +178,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port, | |||
| 178 | { | 178 | { |
| 179 | int ret = 0; | 179 | int ret = 0; |
| 180 | struct net_device *old_net_dev; | 180 | struct net_device *old_net_dev; |
| 181 | enum ib_gid_type old_gid_type; | ||
| 181 | 182 | ||
| 182 | /* in rdma_cap_roce_gid_table, this funciton should be protected by a | 183 | /* in rdma_cap_roce_gid_table, this funciton should be protected by a |
| 183 | * sleep-able lock. | 184 | * sleep-able lock. |
| @@ -199,6 +200,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port, | |||
| 199 | } | 200 | } |
| 200 | 201 | ||
| 201 | old_net_dev = table->data_vec[ix].attr.ndev; | 202 | old_net_dev = table->data_vec[ix].attr.ndev; |
| 203 | old_gid_type = table->data_vec[ix].attr.gid_type; | ||
| 202 | if (old_net_dev && old_net_dev != attr->ndev) | 204 | if (old_net_dev && old_net_dev != attr->ndev) |
| 203 | dev_put(old_net_dev); | 205 | dev_put(old_net_dev); |
| 204 | /* if modify_gid failed, just delete the old gid */ | 206 | /* if modify_gid failed, just delete the old gid */ |
| @@ -207,10 +209,14 @@ static int write_gid(struct ib_device *ib_dev, u8 port, | |||
| 207 | attr = &zattr; | 209 | attr = &zattr; |
| 208 | table->data_vec[ix].context = NULL; | 210 | table->data_vec[ix].context = NULL; |
| 209 | } | 211 | } |
| 210 | if (default_gid) | 212 | |
| 211 | table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT; | ||
| 212 | memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid)); | 213 | memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid)); |
| 213 | memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr)); | 214 | memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr)); |
| 215 | if (default_gid) { | ||
| 216 | table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT; | ||
| 217 | if (action == GID_TABLE_WRITE_ACTION_DEL) | ||
| 218 | table->data_vec[ix].attr.gid_type = old_gid_type; | ||
| 219 | } | ||
| 214 | if (table->data_vec[ix].attr.ndev && | 220 | if (table->data_vec[ix].attr.ndev && |
| 215 | table->data_vec[ix].attr.ndev != old_net_dev) | 221 | table->data_vec[ix].attr.ndev != old_net_dev) |
| 216 | dev_hold(table->data_vec[ix].attr.ndev); | 222 | dev_hold(table->data_vec[ix].attr.ndev); |
| @@ -405,7 +411,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, | |||
| 405 | 411 | ||
| 406 | for (ix = 0; ix < table->sz; ix++) | 412 | for (ix = 0; ix < table->sz; ix++) |
| 407 | if (table->data_vec[ix].attr.ndev == ndev) | 413 | if (table->data_vec[ix].attr.ndev == ndev) |
| 408 | if (!del_gid(ib_dev, port, table, ix, false)) | 414 | if (!del_gid(ib_dev, port, table, ix, |
| 415 | !!(table->data_vec[ix].props & | ||
| 416 | GID_TABLE_ENTRY_DEFAULT))) | ||
| 409 | deleted = true; | 417 | deleted = true; |
| 410 | 418 | ||
| 411 | write_unlock_irq(&table->rwlock); | 419 | write_unlock_irq(&table->rwlock); |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 1d92e091e22e..c99525512b34 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
| @@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id) | |||
| 3452 | work->cm_event.event = IB_CM_USER_ESTABLISHED; | 3452 | work->cm_event.event = IB_CM_USER_ESTABLISHED; |
| 3453 | 3453 | ||
| 3454 | /* Check if the device started its remove_one */ | 3454 | /* Check if the device started its remove_one */ |
| 3455 | spin_lock_irq(&cm.lock); | 3455 | spin_lock_irqsave(&cm.lock, flags); |
| 3456 | if (!cm_dev->going_down) { | 3456 | if (!cm_dev->going_down) { |
| 3457 | queue_delayed_work(cm.wq, &work->work, 0); | 3457 | queue_delayed_work(cm.wq, &work->work, 0); |
| 3458 | } else { | 3458 | } else { |
| 3459 | kfree(work); | 3459 | kfree(work); |
| 3460 | ret = -ENODEV; | 3460 | ret = -ENODEV; |
| 3461 | } | 3461 | } |
| 3462 | spin_unlock_irq(&cm.lock); | 3462 | spin_unlock_irqrestore(&cm.lock, flags); |
| 3463 | 3463 | ||
| 3464 | out: | 3464 | out: |
| 3465 | return ret; | 3465 | return ret; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index f0c91ba3178a..ad1b1adcf6f0 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -708,17 +708,6 @@ static void cma_deref_id(struct rdma_id_private *id_priv) | |||
| 708 | complete(&id_priv->comp); | 708 | complete(&id_priv->comp); |
| 709 | } | 709 | } |
| 710 | 710 | ||
| 711 | static int cma_disable_callback(struct rdma_id_private *id_priv, | ||
| 712 | enum rdma_cm_state state) | ||
| 713 | { | ||
| 714 | mutex_lock(&id_priv->handler_mutex); | ||
| 715 | if (id_priv->state != state) { | ||
| 716 | mutex_unlock(&id_priv->handler_mutex); | ||
| 717 | return -EINVAL; | ||
| 718 | } | ||
| 719 | return 0; | ||
| 720 | } | ||
| 721 | |||
| 722 | struct rdma_cm_id *rdma_create_id(struct net *net, | 711 | struct rdma_cm_id *rdma_create_id(struct net *net, |
| 723 | rdma_cm_event_handler event_handler, | 712 | rdma_cm_event_handler event_handler, |
| 724 | void *context, enum rdma_port_space ps, | 713 | void *context, enum rdma_port_space ps, |
| @@ -1671,11 +1660,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
| 1671 | struct rdma_cm_event event; | 1660 | struct rdma_cm_event event; |
| 1672 | int ret = 0; | 1661 | int ret = 0; |
| 1673 | 1662 | ||
| 1663 | mutex_lock(&id_priv->handler_mutex); | ||
| 1674 | if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && | 1664 | if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && |
| 1675 | cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || | 1665 | id_priv->state != RDMA_CM_CONNECT) || |
| 1676 | (ib_event->event == IB_CM_TIMEWAIT_EXIT && | 1666 | (ib_event->event == IB_CM_TIMEWAIT_EXIT && |
| 1677 | cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) | 1667 | id_priv->state != RDMA_CM_DISCONNECT)) |
| 1678 | return 0; | 1668 | goto out; |
| 1679 | 1669 | ||
| 1680 | memset(&event, 0, sizeof event); | 1670 | memset(&event, 0, sizeof event); |
| 1681 | switch (ib_event->event) { | 1671 | switch (ib_event->event) { |
| @@ -1870,7 +1860,7 @@ static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_e | |||
| 1870 | 1860 | ||
| 1871 | static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | 1861 | static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) |
| 1872 | { | 1862 | { |
| 1873 | struct rdma_id_private *listen_id, *conn_id; | 1863 | struct rdma_id_private *listen_id, *conn_id = NULL; |
| 1874 | struct rdma_cm_event event; | 1864 | struct rdma_cm_event event; |
| 1875 | struct net_device *net_dev; | 1865 | struct net_device *net_dev; |
| 1876 | int offset, ret; | 1866 | int offset, ret; |
| @@ -1884,9 +1874,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
| 1884 | goto net_dev_put; | 1874 | goto net_dev_put; |
| 1885 | } | 1875 | } |
| 1886 | 1876 | ||
| 1887 | if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) { | 1877 | mutex_lock(&listen_id->handler_mutex); |
| 1878 | if (listen_id->state != RDMA_CM_LISTEN) { | ||
| 1888 | ret = -ECONNABORTED; | 1879 | ret = -ECONNABORTED; |
| 1889 | goto net_dev_put; | 1880 | goto err1; |
| 1890 | } | 1881 | } |
| 1891 | 1882 | ||
| 1892 | memset(&event, 0, sizeof event); | 1883 | memset(&event, 0, sizeof event); |
| @@ -1976,8 +1967,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
| 1976 | struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; | 1967 | struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; |
| 1977 | struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; | 1968 | struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; |
| 1978 | 1969 | ||
| 1979 | if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) | 1970 | mutex_lock(&id_priv->handler_mutex); |
| 1980 | return 0; | 1971 | if (id_priv->state != RDMA_CM_CONNECT) |
| 1972 | goto out; | ||
| 1981 | 1973 | ||
| 1982 | memset(&event, 0, sizeof event); | 1974 | memset(&event, 0, sizeof event); |
| 1983 | switch (iw_event->event) { | 1975 | switch (iw_event->event) { |
| @@ -2029,6 +2021,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
| 2029 | return ret; | 2021 | return ret; |
| 2030 | } | 2022 | } |
| 2031 | 2023 | ||
| 2024 | out: | ||
| 2032 | mutex_unlock(&id_priv->handler_mutex); | 2025 | mutex_unlock(&id_priv->handler_mutex); |
| 2033 | return ret; | 2026 | return ret; |
| 2034 | } | 2027 | } |
| @@ -2039,13 +2032,15 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
| 2039 | struct rdma_cm_id *new_cm_id; | 2032 | struct rdma_cm_id *new_cm_id; |
| 2040 | struct rdma_id_private *listen_id, *conn_id; | 2033 | struct rdma_id_private *listen_id, *conn_id; |
| 2041 | struct rdma_cm_event event; | 2034 | struct rdma_cm_event event; |
| 2042 | int ret; | 2035 | int ret = -ECONNABORTED; |
| 2043 | struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; | 2036 | struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; |
| 2044 | struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; | 2037 | struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; |
| 2045 | 2038 | ||
| 2046 | listen_id = cm_id->context; | 2039 | listen_id = cm_id->context; |
| 2047 | if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) | 2040 | |
| 2048 | return -ECONNABORTED; | 2041 | mutex_lock(&listen_id->handler_mutex); |
| 2042 | if (listen_id->state != RDMA_CM_LISTEN) | ||
| 2043 | goto out; | ||
| 2049 | 2044 | ||
| 2050 | /* Create a new RDMA id for the new IW CM ID */ | 2045 | /* Create a new RDMA id for the new IW CM ID */ |
| 2051 | new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, | 2046 | new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, |
| @@ -3216,8 +3211,9 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, | |||
| 3216 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; | 3211 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; |
| 3217 | int ret = 0; | 3212 | int ret = 0; |
| 3218 | 3213 | ||
| 3219 | if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) | 3214 | mutex_lock(&id_priv->handler_mutex); |
| 3220 | return 0; | 3215 | if (id_priv->state != RDMA_CM_CONNECT) |
| 3216 | goto out; | ||
| 3221 | 3217 | ||
| 3222 | memset(&event, 0, sizeof event); | 3218 | memset(&event, 0, sizeof event); |
| 3223 | switch (ib_event->event) { | 3219 | switch (ib_event->event) { |
| @@ -3673,12 +3669,13 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) | |||
| 3673 | struct rdma_id_private *id_priv; | 3669 | struct rdma_id_private *id_priv; |
| 3674 | struct cma_multicast *mc = multicast->context; | 3670 | struct cma_multicast *mc = multicast->context; |
| 3675 | struct rdma_cm_event event; | 3671 | struct rdma_cm_event event; |
| 3676 | int ret; | 3672 | int ret = 0; |
| 3677 | 3673 | ||
| 3678 | id_priv = mc->id_priv; | 3674 | id_priv = mc->id_priv; |
| 3679 | if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && | 3675 | mutex_lock(&id_priv->handler_mutex); |
| 3680 | cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) | 3676 | if (id_priv->state != RDMA_CM_ADDR_BOUND && |
| 3681 | return 0; | 3677 | id_priv->state != RDMA_CM_ADDR_RESOLVED) |
| 3678 | goto out; | ||
| 3682 | 3679 | ||
| 3683 | if (!status) | 3680 | if (!status) |
| 3684 | status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); | 3681 | status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); |
| @@ -3720,6 +3717,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) | |||
| 3720 | return 0; | 3717 | return 0; |
| 3721 | } | 3718 | } |
| 3722 | 3719 | ||
| 3720 | out: | ||
| 3723 | mutex_unlock(&id_priv->handler_mutex); | 3721 | mutex_unlock(&id_priv->handler_mutex); |
| 3724 | return 0; | 3722 | return 0; |
| 3725 | } | 3723 | } |
| @@ -3878,12 +3876,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, | |||
| 3878 | gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - | 3876 | gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - |
| 3879 | rdma_start_port(id_priv->cma_dev->device)]; | 3877 | rdma_start_port(id_priv->cma_dev->device)]; |
| 3880 | if (addr->sa_family == AF_INET) { | 3878 | if (addr->sa_family == AF_INET) { |
| 3881 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) | 3879 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { |
| 3880 | mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; | ||
| 3882 | err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, | 3881 | err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, |
| 3883 | true); | 3882 | true); |
| 3884 | if (!err) { | 3883 | if (!err) |
| 3885 | mc->igmp_joined = true; | 3884 | mc->igmp_joined = true; |
| 3886 | mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; | ||
| 3887 | } | 3885 | } |
| 3888 | } else { | 3886 | } else { |
| 3889 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) | 3887 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 5516fb070344..5c155fa91eec 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
| @@ -661,6 +661,9 @@ int ib_query_port(struct ib_device *device, | |||
| 661 | if (err || port_attr->subnet_prefix) | 661 | if (err || port_attr->subnet_prefix) |
| 662 | return err; | 662 | return err; |
| 663 | 663 | ||
| 664 | if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) | ||
| 665 | return 0; | ||
| 666 | |||
| 664 | err = ib_query_gid(device, port_num, 0, &gid, NULL); | 667 | err = ib_query_gid(device, port_num, 0, &gid, NULL); |
| 665 | if (err) | 668 | if (err) |
| 666 | return err; | 669 | return err; |
| @@ -1024,7 +1027,8 @@ static int __init ib_core_init(void) | |||
| 1024 | goto err_mad; | 1027 | goto err_mad; |
| 1025 | } | 1028 | } |
| 1026 | 1029 | ||
| 1027 | if (ib_add_ibnl_clients()) { | 1030 | ret = ib_add_ibnl_clients(); |
| 1031 | if (ret) { | ||
| 1028 | pr_warn("Couldn't register ibnl clients\n"); | 1032 | pr_warn("Couldn't register ibnl clients\n"); |
| 1029 | goto err_sa; | 1033 | goto err_sa; |
| 1030 | } | 1034 | } |
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index 43e3fa27102b..1c41b95cefec 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c | |||
| @@ -506,7 +506,7 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb, | |||
| 506 | if (!nlmsg_request) { | 506 | if (!nlmsg_request) { |
| 507 | pr_info("%s: Could not find a matching request (seq = %u)\n", | 507 | pr_info("%s: Could not find a matching request (seq = %u)\n", |
| 508 | __func__, msg_seq); | 508 | __func__, msg_seq); |
| 509 | return -EINVAL; | 509 | return -EINVAL; |
| 510 | } | 510 | } |
| 511 | pm_msg = nlmsg_request->req_buffer; | 511 | pm_msg = nlmsg_request->req_buffer; |
| 512 | local_sockaddr = (struct sockaddr_storage *) | 512 | local_sockaddr = (struct sockaddr_storage *) |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 82fb511112da..2d49228f28b2 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
| @@ -1638,9 +1638,9 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) | |||
| 1638 | /* Now, check to see if there are any methods still in use */ | 1638 | /* Now, check to see if there are any methods still in use */ |
| 1639 | if (!check_method_table(method)) { | 1639 | if (!check_method_table(method)) { |
| 1640 | /* If not, release management method table */ | 1640 | /* If not, release management method table */ |
| 1641 | kfree(method); | 1641 | kfree(method); |
| 1642 | class->method_table[mgmt_class] = NULL; | 1642 | class->method_table[mgmt_class] = NULL; |
| 1643 | /* Any management classes left ? */ | 1643 | /* Any management classes left ? */ |
| 1644 | if (!check_class_table(class)) { | 1644 | if (!check_class_table(class)) { |
| 1645 | /* If not, release management class table */ | 1645 | /* If not, release management class table */ |
| 1646 | kfree(class); | 1646 | kfree(class); |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 5e573bb18660..a5793c8f1590 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
| @@ -889,9 +889,9 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num) | |||
| 889 | static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | 889 | static void setup_hw_stats(struct ib_device *device, struct ib_port *port, |
| 890 | u8 port_num) | 890 | u8 port_num) |
| 891 | { | 891 | { |
| 892 | struct attribute_group *hsag = NULL; | 892 | struct attribute_group *hsag; |
| 893 | struct rdma_hw_stats *stats; | 893 | struct rdma_hw_stats *stats; |
| 894 | int i = 0, ret; | 894 | int i, ret; |
| 895 | 895 | ||
| 896 | stats = device->alloc_hw_stats(device, port_num); | 896 | stats = device->alloc_hw_stats(device, port_num); |
| 897 | 897 | ||
| @@ -899,19 +899,22 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | |||
| 899 | return; | 899 | return; |
| 900 | 900 | ||
| 901 | if (!stats->names || stats->num_counters <= 0) | 901 | if (!stats->names || stats->num_counters <= 0) |
| 902 | goto err; | 902 | goto err_free_stats; |
| 903 | 903 | ||
| 904 | /* | ||
| 905 | * Two extra attribue elements here, one for the lifespan entry and | ||
| 906 | * one to NULL terminate the list for the sysfs core code | ||
| 907 | */ | ||
| 904 | hsag = kzalloc(sizeof(*hsag) + | 908 | hsag = kzalloc(sizeof(*hsag) + |
| 905 | // 1 extra for the lifespan config entry | 909 | sizeof(void *) * (stats->num_counters + 2), |
| 906 | sizeof(void *) * (stats->num_counters + 1), | ||
| 907 | GFP_KERNEL); | 910 | GFP_KERNEL); |
| 908 | if (!hsag) | 911 | if (!hsag) |
| 909 | return; | 912 | goto err_free_stats; |
| 910 | 913 | ||
| 911 | ret = device->get_hw_stats(device, stats, port_num, | 914 | ret = device->get_hw_stats(device, stats, port_num, |
| 912 | stats->num_counters); | 915 | stats->num_counters); |
| 913 | if (ret != stats->num_counters) | 916 | if (ret != stats->num_counters) |
| 914 | goto err; | 917 | goto err_free_hsag; |
| 915 | 918 | ||
| 916 | stats->timestamp = jiffies; | 919 | stats->timestamp = jiffies; |
| 917 | 920 | ||
| @@ -922,10 +925,13 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | |||
| 922 | hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]); | 925 | hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]); |
| 923 | if (!hsag->attrs[i]) | 926 | if (!hsag->attrs[i]) |
| 924 | goto err; | 927 | goto err; |
| 928 | sysfs_attr_init(hsag->attrs[i]); | ||
| 925 | } | 929 | } |
| 926 | 930 | ||
| 927 | /* treat an error here as non-fatal */ | 931 | /* treat an error here as non-fatal */ |
| 928 | hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num); | 932 | hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num); |
| 933 | if (hsag->attrs[i]) | ||
| 934 | sysfs_attr_init(hsag->attrs[i]); | ||
| 929 | 935 | ||
| 930 | if (port) { | 936 | if (port) { |
| 931 | struct kobject *kobj = &port->kobj; | 937 | struct kobject *kobj = &port->kobj; |
| @@ -946,10 +952,12 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | |||
| 946 | return; | 952 | return; |
| 947 | 953 | ||
| 948 | err: | 954 | err: |
| 949 | kfree(stats); | ||
| 950 | for (; i >= 0; i--) | 955 | for (; i >= 0; i--) |
| 951 | kfree(hsag->attrs[i]); | 956 | kfree(hsag->attrs[i]); |
| 957 | err_free_hsag: | ||
| 952 | kfree(hsag); | 958 | kfree(hsag); |
| 959 | err_free_stats: | ||
| 960 | kfree(stats); | ||
| 953 | return; | 961 | return; |
| 954 | } | 962 | } |
| 955 | 963 | ||
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 1a8babb8ee3c..825021d1008b 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
| @@ -1747,7 +1747,7 @@ static int create_qp(struct ib_uverbs_file *file, | |||
| 1747 | struct ib_srq *srq = NULL; | 1747 | struct ib_srq *srq = NULL; |
| 1748 | struct ib_qp *qp; | 1748 | struct ib_qp *qp; |
| 1749 | char *buf; | 1749 | char *buf; |
| 1750 | struct ib_qp_init_attr attr; | 1750 | struct ib_qp_init_attr attr = {}; |
| 1751 | struct ib_uverbs_ex_create_qp_resp resp; | 1751 | struct ib_uverbs_ex_create_qp_resp resp; |
| 1752 | int ret; | 1752 | int ret; |
| 1753 | 1753 | ||
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 1d7d4cf442e3..6298f54b4137 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
| @@ -511,12 +511,16 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, | |||
| 511 | ah_attr->grh.dgid = sgid; | 511 | ah_attr->grh.dgid = sgid; |
| 512 | 512 | ||
| 513 | if (!rdma_cap_eth_ah(device, port_num)) { | 513 | if (!rdma_cap_eth_ah(device, port_num)) { |
| 514 | ret = ib_find_cached_gid_by_port(device, &dgid, | 514 | if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { |
| 515 | IB_GID_TYPE_IB, | 515 | ret = ib_find_cached_gid_by_port(device, &dgid, |
| 516 | port_num, NULL, | 516 | IB_GID_TYPE_IB, |
| 517 | &gid_index); | 517 | port_num, NULL, |
| 518 | if (ret) | 518 | &gid_index); |
| 519 | return ret; | 519 | if (ret) |
| 520 | return ret; | ||
| 521 | } else { | ||
| 522 | gid_index = 0; | ||
| 523 | } | ||
| 520 | } | 524 | } |
| 521 | 525 | ||
| 522 | ah_attr->grh.sgid_index = (u8) gid_index; | 526 | ah_attr->grh.sgid_index = (u8) gid_index; |
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 6e7050ab9e16..14d7eeb09be6 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c | |||
| @@ -300,16 +300,15 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 300 | const struct cpumask *node_mask, | 300 | const struct cpumask *node_mask, |
| 301 | *proc_mask = tsk_cpus_allowed(current); | 301 | *proc_mask = tsk_cpus_allowed(current); |
| 302 | struct cpu_mask_set *set = &dd->affinity->proc; | 302 | struct cpu_mask_set *set = &dd->affinity->proc; |
| 303 | char buf[1024]; | ||
| 304 | 303 | ||
| 305 | /* | 304 | /* |
| 306 | * check whether process/context affinity has already | 305 | * check whether process/context affinity has already |
| 307 | * been set | 306 | * been set |
| 308 | */ | 307 | */ |
| 309 | if (cpumask_weight(proc_mask) == 1) { | 308 | if (cpumask_weight(proc_mask) == 1) { |
| 310 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask)); | 309 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", |
| 311 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s", | 310 | current->pid, current->comm, |
| 312 | current->pid, current->comm, buf); | 311 | cpumask_pr_args(proc_mask)); |
| 313 | /* | 312 | /* |
| 314 | * Mark the pre-set CPU as used. This is atomic so we don't | 313 | * Mark the pre-set CPU as used. This is atomic so we don't |
| 315 | * need the lock | 314 | * need the lock |
| @@ -318,9 +317,9 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 318 | cpumask_set_cpu(cpu, &set->used); | 317 | cpumask_set_cpu(cpu, &set->used); |
| 319 | goto done; | 318 | goto done; |
| 320 | } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { | 319 | } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { |
| 321 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask)); | 320 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", |
| 322 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s", | 321 | current->pid, current->comm, |
| 323 | current->pid, current->comm, buf); | 322 | cpumask_pr_args(proc_mask)); |
| 324 | goto done; | 323 | goto done; |
| 325 | } | 324 | } |
| 326 | 325 | ||
| @@ -356,8 +355,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 356 | cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ? | 355 | cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ? |
| 357 | &dd->affinity->rcv_intr.mask : | 356 | &dd->affinity->rcv_intr.mask : |
| 358 | &dd->affinity->rcv_intr.used)); | 357 | &dd->affinity->rcv_intr.used)); |
| 359 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs)); | 358 | hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl", |
| 360 | hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf); | 359 | cpumask_pr_args(intrs)); |
| 361 | 360 | ||
| 362 | /* | 361 | /* |
| 363 | * If we don't have a NUMA node requested, preference is towards | 362 | * If we don't have a NUMA node requested, preference is towards |
| @@ -366,18 +365,16 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 366 | if (node == -1) | 365 | if (node == -1) |
| 367 | node = dd->node; | 366 | node = dd->node; |
| 368 | node_mask = cpumask_of_node(node); | 367 | node_mask = cpumask_of_node(node); |
| 369 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask)); | 368 | hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node, |
| 370 | hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf); | 369 | cpumask_pr_args(node_mask)); |
| 371 | 370 | ||
| 372 | /* diff will hold all unused cpus */ | 371 | /* diff will hold all unused cpus */ |
| 373 | cpumask_andnot(diff, &set->mask, &set->used); | 372 | cpumask_andnot(diff, &set->mask, &set->used); |
| 374 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff)); | 373 | hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff)); |
| 375 | hfi1_cdbg(PROC, "unused CPUs (all) %s", buf); | ||
| 376 | 374 | ||
| 377 | /* get cpumask of available CPUs on preferred NUMA */ | 375 | /* get cpumask of available CPUs on preferred NUMA */ |
| 378 | cpumask_and(mask, diff, node_mask); | 376 | cpumask_and(mask, diff, node_mask); |
| 379 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask)); | 377 | hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask)); |
| 380 | hfi1_cdbg(PROC, "available cpus on NUMA %s", buf); | ||
| 381 | 378 | ||
| 382 | /* | 379 | /* |
| 383 | * At first, we don't want to place processes on the same | 380 | * At first, we don't want to place processes on the same |
| @@ -395,8 +392,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 395 | cpumask_andnot(diff, &set->mask, &set->used); | 392 | cpumask_andnot(diff, &set->mask, &set->used); |
| 396 | cpumask_andnot(mask, diff, node_mask); | 393 | cpumask_andnot(mask, diff, node_mask); |
| 397 | } | 394 | } |
| 398 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask)); | 395 | hfi1_cdbg(PROC, "possible CPUs for process %*pbl", |
| 399 | hfi1_cdbg(PROC, "possible CPUs for process %s", buf); | 396 | cpumask_pr_args(mask)); |
| 400 | 397 | ||
| 401 | cpu = cpumask_first(mask); | 398 | cpu = cpumask_first(mask); |
| 402 | if (cpu >= nr_cpu_ids) /* empty */ | 399 | if (cpu >= nr_cpu_ids) /* empty */ |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 3b876da745a1..f5de85178055 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
| @@ -1037,7 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *); | |||
| 1037 | static void dc_start(struct hfi1_devdata *); | 1037 | static void dc_start(struct hfi1_devdata *); |
| 1038 | static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, | 1038 | static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, |
| 1039 | unsigned int *np); | 1039 | unsigned int *np); |
| 1040 | static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd); | 1040 | static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); |
| 1041 | 1041 | ||
| 1042 | /* | 1042 | /* |
| 1043 | * Error interrupt table entry. This is used as input to the interrupt | 1043 | * Error interrupt table entry. This is used as input to the interrupt |
| @@ -6962,8 +6962,6 @@ void handle_link_down(struct work_struct *work) | |||
| 6962 | } | 6962 | } |
| 6963 | 6963 | ||
| 6964 | reset_neighbor_info(ppd); | 6964 | reset_neighbor_info(ppd); |
| 6965 | if (ppd->mgmt_allowed) | ||
| 6966 | remove_full_mgmt_pkey(ppd); | ||
| 6967 | 6965 | ||
| 6968 | /* disable the port */ | 6966 | /* disable the port */ |
| 6969 | clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); | 6967 | clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); |
| @@ -7070,12 +7068,16 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd) | |||
| 7070 | __func__, ppd->pkeys[2], FULL_MGMT_P_KEY); | 7068 | __func__, ppd->pkeys[2], FULL_MGMT_P_KEY); |
| 7071 | ppd->pkeys[2] = FULL_MGMT_P_KEY; | 7069 | ppd->pkeys[2] = FULL_MGMT_P_KEY; |
| 7072 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); | 7070 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); |
| 7071 | hfi1_event_pkey_change(ppd->dd, ppd->port); | ||
| 7073 | } | 7072 | } |
| 7074 | 7073 | ||
| 7075 | static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd) | 7074 | static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd) |
| 7076 | { | 7075 | { |
| 7077 | ppd->pkeys[2] = 0; | 7076 | if (ppd->pkeys[2] != 0) { |
| 7078 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); | 7077 | ppd->pkeys[2] = 0; |
| 7078 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); | ||
| 7079 | hfi1_event_pkey_change(ppd->dd, ppd->port); | ||
| 7080 | } | ||
| 7079 | } | 7081 | } |
| 7080 | 7082 | ||
| 7081 | /* | 7083 | /* |
| @@ -7832,8 +7834,8 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) | |||
| 7832 | * save first 2 flits in the packet that caused | 7834 | * save first 2 flits in the packet that caused |
| 7833 | * the error | 7835 | * the error |
| 7834 | */ | 7836 | */ |
| 7835 | dd->err_info_rcvport.packet_flit1 = hdr0; | 7837 | dd->err_info_rcvport.packet_flit1 = hdr0; |
| 7836 | dd->err_info_rcvport.packet_flit2 = hdr1; | 7838 | dd->err_info_rcvport.packet_flit2 = hdr1; |
| 7837 | } | 7839 | } |
| 7838 | switch (info) { | 7840 | switch (info) { |
| 7839 | case 1: | 7841 | case 1: |
| @@ -9168,6 +9170,13 @@ int start_link(struct hfi1_pportdata *ppd) | |||
| 9168 | return 0; | 9170 | return 0; |
| 9169 | } | 9171 | } |
| 9170 | 9172 | ||
| 9173 | /* | ||
| 9174 | * FULL_MGMT_P_KEY is cleared from the pkey table, so that the | ||
| 9175 | * pkey table can be configured properly if the HFI unit is connected | ||
| 9176 | * to switch port with MgmtAllowed=NO | ||
| 9177 | */ | ||
| 9178 | clear_full_mgmt_pkey(ppd); | ||
| 9179 | |||
| 9171 | return set_link_state(ppd, HLS_DN_POLL); | 9180 | return set_link_state(ppd, HLS_DN_POLL); |
| 9172 | } | 9181 | } |
| 9173 | 9182 | ||
| @@ -9777,7 +9786,7 @@ static void set_send_length(struct hfi1_pportdata *ppd) | |||
| 9777 | u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) | 9786 | u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) |
| 9778 | & SEND_LEN_CHECK1_LEN_VL15_MASK) << | 9787 | & SEND_LEN_CHECK1_LEN_VL15_MASK) << |
| 9779 | SEND_LEN_CHECK1_LEN_VL15_SHIFT; | 9788 | SEND_LEN_CHECK1_LEN_VL15_SHIFT; |
| 9780 | int i; | 9789 | int i, j; |
| 9781 | u32 thres; | 9790 | u32 thres; |
| 9782 | 9791 | ||
| 9783 | for (i = 0; i < ppd->vls_supported; i++) { | 9792 | for (i = 0; i < ppd->vls_supported; i++) { |
| @@ -9801,7 +9810,10 @@ static void set_send_length(struct hfi1_pportdata *ppd) | |||
| 9801 | sc_mtu_to_threshold(dd->vld[i].sc, | 9810 | sc_mtu_to_threshold(dd->vld[i].sc, |
| 9802 | dd->vld[i].mtu, | 9811 | dd->vld[i].mtu, |
| 9803 | dd->rcd[0]->rcvhdrqentsize)); | 9812 | dd->rcd[0]->rcvhdrqentsize)); |
| 9804 | sc_set_cr_threshold(dd->vld[i].sc, thres); | 9813 | for (j = 0; j < INIT_SC_PER_VL; j++) |
| 9814 | sc_set_cr_threshold( | ||
| 9815 | pio_select_send_context_vl(dd, j, i), | ||
| 9816 | thres); | ||
| 9805 | } | 9817 | } |
| 9806 | thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), | 9818 | thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), |
| 9807 | sc_mtu_to_threshold(dd->vld[15].sc, | 9819 | sc_mtu_to_threshold(dd->vld[15].sc, |
| @@ -11906,7 +11918,7 @@ static void update_synth_timer(unsigned long opaque) | |||
| 11906 | hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); | 11918 | hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); |
| 11907 | } | 11919 | } |
| 11908 | 11920 | ||
| 11909 | mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); | 11921 | mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); |
| 11910 | } | 11922 | } |
| 11911 | 11923 | ||
| 11912 | #define C_MAX_NAME 13 /* 12 chars + one for /0 */ | 11924 | #define C_MAX_NAME 13 /* 12 chars + one for /0 */ |
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 7a5b0e676cc7..c702a009608f 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c | |||
| @@ -203,6 +203,9 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, | |||
| 203 | 203 | ||
| 204 | switch (cmd) { | 204 | switch (cmd) { |
| 205 | case HFI1_IOCTL_ASSIGN_CTXT: | 205 | case HFI1_IOCTL_ASSIGN_CTXT: |
| 206 | if (uctxt) | ||
| 207 | return -EINVAL; | ||
| 208 | |||
| 206 | if (copy_from_user(&uinfo, | 209 | if (copy_from_user(&uinfo, |
| 207 | (struct hfi1_user_info __user *)arg, | 210 | (struct hfi1_user_info __user *)arg, |
| 208 | sizeof(uinfo))) | 211 | sizeof(uinfo))) |
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 5cc492e5776d..eed971ccd2a1 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c | |||
| @@ -1337,7 +1337,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd) | |||
| 1337 | dma_free_coherent(&dd->pcidev->dev, sizeof(u64), | 1337 | dma_free_coherent(&dd->pcidev->dev, sizeof(u64), |
| 1338 | (void *)dd->rcvhdrtail_dummy_kvaddr, | 1338 | (void *)dd->rcvhdrtail_dummy_kvaddr, |
| 1339 | dd->rcvhdrtail_dummy_physaddr); | 1339 | dd->rcvhdrtail_dummy_physaddr); |
| 1340 | dd->rcvhdrtail_dummy_kvaddr = NULL; | 1340 | dd->rcvhdrtail_dummy_kvaddr = NULL; |
| 1341 | } | 1341 | } |
| 1342 | 1342 | ||
| 1343 | for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) { | 1343 | for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) { |
| @@ -1383,7 +1383,7 @@ static void postinit_cleanup(struct hfi1_devdata *dd) | |||
| 1383 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 1383 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
| 1384 | { | 1384 | { |
| 1385 | int ret = 0, j, pidx, initfail; | 1385 | int ret = 0, j, pidx, initfail; |
| 1386 | struct hfi1_devdata *dd = NULL; | 1386 | struct hfi1_devdata *dd = ERR_PTR(-EINVAL); |
| 1387 | struct hfi1_pportdata *ppd; | 1387 | struct hfi1_pportdata *ppd; |
| 1388 | 1388 | ||
| 1389 | /* First, lock the non-writable module parameters */ | 1389 | /* First, lock the non-writable module parameters */ |
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 219029576ba0..fca07a1d6c28 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c | |||
| @@ -78,6 +78,16 @@ static inline void clear_opa_smp_data(struct opa_smp *smp) | |||
| 78 | memset(data, 0, size); | 78 | memset(data, 0, size); |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port) | ||
| 82 | { | ||
| 83 | struct ib_event event; | ||
| 84 | |||
| 85 | event.event = IB_EVENT_PKEY_CHANGE; | ||
| 86 | event.device = &dd->verbs_dev.rdi.ibdev; | ||
| 87 | event.element.port_num = port; | ||
| 88 | ib_dispatch_event(&event); | ||
| 89 | } | ||
| 90 | |||
| 81 | static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) | 91 | static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) |
| 82 | { | 92 | { |
| 83 | struct ib_mad_send_buf *send_buf; | 93 | struct ib_mad_send_buf *send_buf; |
| @@ -1418,15 +1428,10 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys) | |||
| 1418 | } | 1428 | } |
| 1419 | 1429 | ||
| 1420 | if (changed) { | 1430 | if (changed) { |
| 1421 | struct ib_event event; | ||
| 1422 | |||
| 1423 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); | 1431 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); |
| 1424 | 1432 | hfi1_event_pkey_change(dd, port); | |
| 1425 | event.event = IB_EVENT_PKEY_CHANGE; | ||
| 1426 | event.device = &dd->verbs_dev.rdi.ibdev; | ||
| 1427 | event.element.port_num = port; | ||
| 1428 | ib_dispatch_event(&event); | ||
| 1429 | } | 1433 | } |
| 1434 | |||
| 1430 | return 0; | 1435 | return 0; |
| 1431 | } | 1436 | } |
| 1432 | 1437 | ||
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h index 55ee08675333..8b734aaae88a 100644 --- a/drivers/infiniband/hw/hfi1/mad.h +++ b/drivers/infiniband/hw/hfi1/mad.h | |||
| @@ -434,4 +434,6 @@ struct sc2vlnt { | |||
| 434 | COUNTER_MASK(1, 3) | \ | 434 | COUNTER_MASK(1, 3) | \ |
| 435 | COUNTER_MASK(1, 4)) | 435 | COUNTER_MASK(1, 4)) |
| 436 | 436 | ||
| 437 | void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port); | ||
| 438 | |||
| 437 | #endif /* _HFI1_MAD_H */ | 439 | #endif /* _HFI1_MAD_H */ |
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index d5edb1afbb8f..d4022450b73f 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c | |||
| @@ -995,7 +995,7 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause) | |||
| 995 | /* counter is reset if occupancy count changes */ | 995 | /* counter is reset if occupancy count changes */ |
| 996 | if (reg != reg_prev) | 996 | if (reg != reg_prev) |
| 997 | loop = 0; | 997 | loop = 0; |
| 998 | if (loop > 500) { | 998 | if (loop > 50000) { |
| 999 | /* timed out - bounce the link */ | 999 | /* timed out - bounce the link */ |
| 1000 | dd_dev_err(dd, | 1000 | dd_dev_err(dd, |
| 1001 | "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", | 1001 | "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", |
| @@ -1798,6 +1798,21 @@ static void pio_map_rcu_callback(struct rcu_head *list) | |||
| 1798 | } | 1798 | } |
| 1799 | 1799 | ||
| 1800 | /* | 1800 | /* |
| 1801 | * Set credit return threshold for the kernel send context | ||
| 1802 | */ | ||
| 1803 | static void set_threshold(struct hfi1_devdata *dd, int scontext, int i) | ||
| 1804 | { | ||
| 1805 | u32 thres; | ||
| 1806 | |||
| 1807 | thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext], | ||
| 1808 | 50), | ||
| 1809 | sc_mtu_to_threshold(dd->kernel_send_context[scontext], | ||
| 1810 | dd->vld[i].mtu, | ||
| 1811 | dd->rcd[0]->rcvhdrqentsize)); | ||
| 1812 | sc_set_cr_threshold(dd->kernel_send_context[scontext], thres); | ||
| 1813 | } | ||
| 1814 | |||
| 1815 | /* | ||
| 1801 | * pio_map_init - called when #vls change | 1816 | * pio_map_init - called when #vls change |
| 1802 | * @dd: hfi1_devdata | 1817 | * @dd: hfi1_devdata |
| 1803 | * @port: port number | 1818 | * @port: port number |
| @@ -1872,11 +1887,16 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) | |||
| 1872 | if (!newmap->map[i]) | 1887 | if (!newmap->map[i]) |
| 1873 | goto bail; | 1888 | goto bail; |
| 1874 | newmap->map[i]->mask = (1 << ilog2(sz)) - 1; | 1889 | newmap->map[i]->mask = (1 << ilog2(sz)) - 1; |
| 1875 | /* assign send contexts */ | 1890 | /* |
| 1891 | * assign send contexts and | ||
| 1892 | * adjust credit return threshold | ||
| 1893 | */ | ||
| 1876 | for (j = 0; j < sz; j++) { | 1894 | for (j = 0; j < sz; j++) { |
| 1877 | if (dd->kernel_send_context[scontext]) | 1895 | if (dd->kernel_send_context[scontext]) { |
| 1878 | newmap->map[i]->ksc[j] = | 1896 | newmap->map[i]->ksc[j] = |
| 1879 | dd->kernel_send_context[scontext]; | 1897 | dd->kernel_send_context[scontext]; |
| 1898 | set_threshold(dd, scontext, i); | ||
| 1899 | } | ||
| 1880 | if (++scontext >= first_scontext + | 1900 | if (++scontext >= first_scontext + |
| 1881 | vl_scontexts[i]) | 1901 | vl_scontexts[i]) |
| 1882 | /* wrap back to first send context */ | 1902 | /* wrap back to first send context */ |
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c index 2441669f0817..9fb561682c66 100644 --- a/drivers/infiniband/hw/hfi1/qsfp.c +++ b/drivers/infiniband/hw/hfi1/qsfp.c | |||
| @@ -579,7 +579,8 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len) | |||
| 579 | 579 | ||
| 580 | if (ppd->qsfp_info.cache_valid) { | 580 | if (ppd->qsfp_info.cache_valid) { |
| 581 | if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS])) | 581 | if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS])) |
| 582 | sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]); | 582 | snprintf(lenstr, sizeof(lenstr), "%dM ", |
| 583 | cache[QSFP_MOD_LEN_OFFS]); | ||
| 583 | 584 | ||
| 584 | power_byte = cache[QSFP_MOD_PWR_OFFS]; | 585 | power_byte = cache[QSFP_MOD_PWR_OFFS]; |
| 585 | sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", | 586 | sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", |
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c index 79b2952c0dfb..4cfb13771897 100644 --- a/drivers/infiniband/hw/hfi1/trace.c +++ b/drivers/infiniband/hw/hfi1/trace.c | |||
| @@ -214,19 +214,6 @@ const char *print_u32_array( | |||
| 214 | return ret; | 214 | return ret; |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | const char *print_u64_array( | ||
| 218 | struct trace_seq *p, | ||
| 219 | u64 *arr, int len) | ||
| 220 | { | ||
| 221 | int i; | ||
| 222 | const char *ret = trace_seq_buffer_ptr(p); | ||
| 223 | |||
| 224 | for (i = 0; i < len; i++) | ||
| 225 | trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]); | ||
| 226 | trace_seq_putc(p, 0); | ||
| 227 | return ret; | ||
| 228 | } | ||
| 229 | |||
| 230 | __hfi1_trace_fn(PKT); | 217 | __hfi1_trace_fn(PKT); |
| 231 | __hfi1_trace_fn(PROC); | 218 | __hfi1_trace_fn(PROC); |
| 232 | __hfi1_trace_fn(SDMA); | 219 | __hfi1_trace_fn(SDMA); |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 29f4795f866c..47ffd273ecbd 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
| @@ -183,7 +183,7 @@ struct user_sdma_iovec { | |||
| 183 | struct sdma_mmu_node *node; | 183 | struct sdma_mmu_node *node; |
| 184 | }; | 184 | }; |
| 185 | 185 | ||
| 186 | #define SDMA_CACHE_NODE_EVICT BIT(0) | 186 | #define SDMA_CACHE_NODE_EVICT 0 |
| 187 | 187 | ||
| 188 | struct sdma_mmu_node { | 188 | struct sdma_mmu_node { |
| 189 | struct mmu_rb_node rb; | 189 | struct mmu_rb_node rb; |
| @@ -1355,11 +1355,11 @@ static int set_txreq_header(struct user_sdma_request *req, | |||
| 1355 | */ | 1355 | */ |
| 1356 | SDMA_DBG(req, "TID offset %ubytes %uunits om%u", | 1356 | SDMA_DBG(req, "TID offset %ubytes %uunits om%u", |
| 1357 | req->tidoffset, req->tidoffset / req->omfactor, | 1357 | req->tidoffset, req->tidoffset / req->omfactor, |
| 1358 | !!(req->omfactor - KDETH_OM_SMALL)); | 1358 | req->omfactor != KDETH_OM_SMALL); |
| 1359 | KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET, | 1359 | KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET, |
| 1360 | req->tidoffset / req->omfactor); | 1360 | req->tidoffset / req->omfactor); |
| 1361 | KDETH_SET(hdr->kdeth.ver_tid_offset, OM, | 1361 | KDETH_SET(hdr->kdeth.ver_tid_offset, OM, |
| 1362 | !!(req->omfactor - KDETH_OM_SMALL)); | 1362 | req->omfactor != KDETH_OM_SMALL); |
| 1363 | } | 1363 | } |
| 1364 | done: | 1364 | done: |
| 1365 | trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, | 1365 | trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, |
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c index bc95c4112c61..d8fb056526f8 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.c +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c | |||
| @@ -92,11 +92,10 @@ void hfi1_put_txreq(struct verbs_txreq *tx) | |||
| 92 | 92 | ||
| 93 | struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, | 93 | struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, |
| 94 | struct rvt_qp *qp) | 94 | struct rvt_qp *qp) |
| 95 | __must_hold(&qp->s_lock) | ||
| 95 | { | 96 | { |
| 96 | struct verbs_txreq *tx = ERR_PTR(-EBUSY); | 97 | struct verbs_txreq *tx = ERR_PTR(-EBUSY); |
| 97 | unsigned long flags; | ||
| 98 | 98 | ||
| 99 | spin_lock_irqsave(&qp->s_lock, flags); | ||
| 100 | write_seqlock(&dev->iowait_lock); | 99 | write_seqlock(&dev->iowait_lock); |
| 101 | if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { | 100 | if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { |
| 102 | struct hfi1_qp_priv *priv; | 101 | struct hfi1_qp_priv *priv; |
| @@ -116,7 +115,6 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, | |||
| 116 | } | 115 | } |
| 117 | out: | 116 | out: |
| 118 | write_sequnlock(&dev->iowait_lock); | 117 | write_sequnlock(&dev->iowait_lock); |
| 119 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
| 120 | return tx; | 118 | return tx; |
| 121 | } | 119 | } |
| 122 | 120 | ||
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index 1cf69b2fe4a5..a1d6e0807f97 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h | |||
| @@ -73,6 +73,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, | |||
| 73 | 73 | ||
| 74 | static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, | 74 | static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, |
| 75 | struct rvt_qp *qp) | 75 | struct rvt_qp *qp) |
| 76 | __must_hold(&qp->slock) | ||
| 76 | { | 77 | { |
| 77 | struct verbs_txreq *tx; | 78 | struct verbs_txreq *tx; |
| 78 | struct hfi1_qp_priv *priv = qp->priv; | 79 | struct hfi1_qp_priv *priv = qp->priv; |
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index 8b9532034558..b738acdb9b02 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h | |||
| @@ -113,6 +113,8 @@ | |||
| 113 | 113 | ||
| 114 | #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types) | 114 | #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types) |
| 115 | #define IW_CFG_FPM_QP_COUNT 32768 | 115 | #define IW_CFG_FPM_QP_COUNT 32768 |
| 116 | #define I40IW_MAX_PAGES_PER_FMR 512 | ||
| 117 | #define I40IW_MIN_PAGES_PER_FMR 1 | ||
| 116 | 118 | ||
| 117 | #define I40IW_MTU_TO_MSS 40 | 119 | #define I40IW_MTU_TO_MSS 40 |
| 118 | #define I40IW_DEFAULT_MSS 1460 | 120 | #define I40IW_DEFAULT_MSS 1460 |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 02a735b64208..33959ed14563 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c | |||
| @@ -79,6 +79,7 @@ static int i40iw_query_device(struct ib_device *ibdev, | |||
| 79 | props->max_qp_init_rd_atom = props->max_qp_rd_atom; | 79 | props->max_qp_init_rd_atom = props->max_qp_rd_atom; |
| 80 | props->atomic_cap = IB_ATOMIC_NONE; | 80 | props->atomic_cap = IB_ATOMIC_NONE; |
| 81 | props->max_map_per_fmr = 1; | 81 | props->max_map_per_fmr = 1; |
| 82 | props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR; | ||
| 82 | return 0; | 83 | return 0; |
| 83 | } | 84 | } |
| 84 | 85 | ||
| @@ -1527,7 +1528,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, | |||
| 1527 | mutex_lock(&iwdev->pbl_mutex); | 1528 | mutex_lock(&iwdev->pbl_mutex); |
| 1528 | status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); | 1529 | status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); |
| 1529 | mutex_unlock(&iwdev->pbl_mutex); | 1530 | mutex_unlock(&iwdev->pbl_mutex); |
| 1530 | if (!status) | 1531 | if (status) |
| 1531 | goto err1; | 1532 | goto err1; |
| 1532 | 1533 | ||
| 1533 | if (palloc->level != I40IW_LEVEL_1) | 1534 | if (palloc->level != I40IW_LEVEL_1) |
| @@ -2149,6 +2150,7 @@ static int i40iw_post_send(struct ib_qp *ibqp, | |||
| 2149 | struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev; | 2150 | struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev; |
| 2150 | struct i40iw_fast_reg_stag_info info; | 2151 | struct i40iw_fast_reg_stag_info info; |
| 2151 | 2152 | ||
| 2153 | memset(&info, 0, sizeof(info)); | ||
| 2152 | info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD; | 2154 | info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD; |
| 2153 | info.access_rights |= i40iw_get_user_access(flags); | 2155 | info.access_rights |= i40iw_get_user_access(flags); |
| 2154 | info.stag_key = reg_wr(ib_wr)->key & 0xff; | 2156 | info.stag_key = reg_wr(ib_wr)->key & 0xff; |
| @@ -2158,10 +2160,14 @@ static int i40iw_post_send(struct ib_qp *ibqp, | |||
| 2158 | info.addr_type = I40IW_ADDR_TYPE_VA_BASED; | 2160 | info.addr_type = I40IW_ADDR_TYPE_VA_BASED; |
| 2159 | info.va = (void *)(uintptr_t)iwmr->ibmr.iova; | 2161 | info.va = (void *)(uintptr_t)iwmr->ibmr.iova; |
| 2160 | info.total_len = iwmr->ibmr.length; | 2162 | info.total_len = iwmr->ibmr.length; |
| 2163 | info.reg_addr_pa = *(u64 *)palloc->level1.addr; | ||
| 2161 | info.first_pm_pbl_index = palloc->level1.idx; | 2164 | info.first_pm_pbl_index = palloc->level1.idx; |
| 2162 | info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; | 2165 | info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; |
| 2163 | info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED; | 2166 | info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED; |
| 2164 | 2167 | ||
| 2168 | if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR) | ||
| 2169 | info.chunk_size = 1; | ||
| 2170 | |||
| 2165 | if (page_shift == 21) | 2171 | if (page_shift == 21) |
| 2166 | info.page_size = 1; /* 2M page */ | 2172 | info.page_size = 1; /* 2M page */ |
| 2167 | 2173 | ||
| @@ -2327,13 +2333,16 @@ static int i40iw_req_notify_cq(struct ib_cq *ibcq, | |||
| 2327 | { | 2333 | { |
| 2328 | struct i40iw_cq *iwcq; | 2334 | struct i40iw_cq *iwcq; |
| 2329 | struct i40iw_cq_uk *ukcq; | 2335 | struct i40iw_cq_uk *ukcq; |
| 2330 | enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED; | 2336 | unsigned long flags; |
| 2337 | enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT; | ||
| 2331 | 2338 | ||
| 2332 | iwcq = (struct i40iw_cq *)ibcq; | 2339 | iwcq = (struct i40iw_cq *)ibcq; |
| 2333 | ukcq = &iwcq->sc_cq.cq_uk; | 2340 | ukcq = &iwcq->sc_cq.cq_uk; |
| 2334 | if (notify_flags == IB_CQ_NEXT_COMP) | 2341 | if (notify_flags == IB_CQ_SOLICITED) |
| 2335 | cq_notify = IW_CQ_COMPL_EVENT; | 2342 | cq_notify = IW_CQ_COMPL_SOLICITED; |
| 2343 | spin_lock_irqsave(&iwcq->lock, flags); | ||
| 2336 | ukcq->ops.iw_cq_request_notification(ukcq, cq_notify); | 2344 | ukcq->ops.iw_cq_request_notification(ukcq, cq_notify); |
| 2345 | spin_unlock_irqrestore(&iwcq->lock, flags); | ||
| 2337 | return 0; | 2346 | return 0; |
| 2338 | } | 2347 | } |
| 2339 | 2348 | ||
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 105246fba2e7..5fc623362731 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c | |||
| @@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, | |||
| 47 | 47 | ||
| 48 | ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); | 48 | ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); |
| 49 | ah->av.ib.g_slid = ah_attr->src_path_bits; | 49 | ah->av.ib.g_slid = ah_attr->src_path_bits; |
| 50 | ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); | ||
| 50 | if (ah_attr->ah_flags & IB_AH_GRH) { | 51 | if (ah_attr->ah_flags & IB_AH_GRH) { |
| 51 | ah->av.ib.g_slid |= 0x80; | 52 | ah->av.ib.g_slid |= 0x80; |
| 52 | ah->av.ib.gid_index = ah_attr->grh.sgid_index; | 53 | ah->av.ib.gid_index = ah_attr->grh.sgid_index; |
| @@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, | |||
| 64 | !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support)) | 65 | !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support)) |
| 65 | --ah->av.ib.stat_rate; | 66 | --ah->av.ib.stat_rate; |
| 66 | } | 67 | } |
| 67 | ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); | ||
| 68 | 68 | ||
| 69 | return &ah->ibah; | 69 | return &ah->ibah; |
| 70 | } | 70 | } |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index d68f506c1922..9c2e53d28f98 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
| @@ -527,7 +527,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
| 527 | tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); | 527 | tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); |
| 528 | spin_unlock(&tun_qp->tx_lock); | 528 | spin_unlock(&tun_qp->tx_lock); |
| 529 | if (ret) | 529 | if (ret) |
| 530 | goto out; | 530 | goto end; |
| 531 | 531 | ||
| 532 | tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); | 532 | tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); |
| 533 | if (tun_qp->tx_ring[tun_tx_ix].ah) | 533 | if (tun_qp->tx_ring[tun_tx_ix].ah) |
| @@ -596,9 +596,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
| 596 | wr.wr.send_flags = IB_SEND_SIGNALED; | 596 | wr.wr.send_flags = IB_SEND_SIGNALED; |
| 597 | 597 | ||
| 598 | ret = ib_post_send(src_qp, &wr.wr, &bad_wr); | 598 | ret = ib_post_send(src_qp, &wr.wr, &bad_wr); |
| 599 | out: | 599 | if (!ret) |
| 600 | if (ret) | 600 | return 0; |
| 601 | ib_destroy_ah(ah); | 601 | out: |
| 602 | spin_lock(&tun_qp->tx_lock); | ||
| 603 | tun_qp->tx_ix_tail++; | ||
| 604 | spin_unlock(&tun_qp->tx_lock); | ||
| 605 | tun_qp->tx_ring[tun_tx_ix].ah = NULL; | ||
| 606 | end: | ||
| 607 | ib_destroy_ah(ah); | ||
| 602 | return ret; | 608 | return ret; |
| 603 | } | 609 | } |
| 604 | 610 | ||
| @@ -1326,9 +1332,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
| 1326 | 1332 | ||
| 1327 | 1333 | ||
| 1328 | ret = ib_post_send(send_qp, &wr.wr, &bad_wr); | 1334 | ret = ib_post_send(send_qp, &wr.wr, &bad_wr); |
| 1335 | if (!ret) | ||
| 1336 | return 0; | ||
| 1337 | |||
| 1338 | spin_lock(&sqp->tx_lock); | ||
| 1339 | sqp->tx_ix_tail++; | ||
| 1340 | spin_unlock(&sqp->tx_lock); | ||
| 1341 | sqp->tx_ring[wire_tx_ix].ah = NULL; | ||
| 1329 | out: | 1342 | out: |
| 1330 | if (ret) | 1343 | ib_destroy_ah(ah); |
| 1331 | ib_destroy_ah(ah); | ||
| 1332 | return ret; | 1344 | return ret; |
| 1333 | } | 1345 | } |
| 1334 | 1346 | ||
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index b01ef6eee6e8..42a46078d7d5 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -505,9 +505,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
| 505 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; | 505 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; |
| 506 | else | 506 | else |
| 507 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; | 507 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; |
| 508 | if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) | ||
| 509 | props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; | ||
| 510 | } | 508 | } |
| 509 | if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) | ||
| 510 | props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; | ||
| 511 | 511 | ||
| 512 | props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; | 512 | props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; |
| 513 | 513 | ||
| @@ -1704,6 +1704,9 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, | |||
| 1704 | struct mlx4_dev *dev = (to_mdev(qp->device))->dev; | 1704 | struct mlx4_dev *dev = (to_mdev(qp->device))->dev; |
| 1705 | int is_bonded = mlx4_is_bonded(dev); | 1705 | int is_bonded = mlx4_is_bonded(dev); |
| 1706 | 1706 | ||
| 1707 | if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt) | ||
| 1708 | return ERR_PTR(-EINVAL); | ||
| 1709 | |||
| 1707 | if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && | 1710 | if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && |
| 1708 | (flow_attr->type != IB_FLOW_ATTR_NORMAL)) | 1711 | (flow_attr->type != IB_FLOW_ATTR_NORMAL)) |
| 1709 | return ERR_PTR(-EOPNOTSUPP); | 1712 | return ERR_PTR(-EOPNOTSUPP); |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 6c5ac5d8f32f..29acda249612 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
| @@ -139,7 +139,7 @@ struct mlx4_ib_mr { | |||
| 139 | u32 max_pages; | 139 | u32 max_pages; |
| 140 | struct mlx4_mr mmr; | 140 | struct mlx4_mr mmr; |
| 141 | struct ib_umem *umem; | 141 | struct ib_umem *umem; |
| 142 | void *pages_alloc; | 142 | size_t page_map_size; |
| 143 | }; | 143 | }; |
| 144 | 144 | ||
| 145 | struct mlx4_ib_mw { | 145 | struct mlx4_ib_mw { |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 631272172a0b..5d73989d9771 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
| @@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device, | |||
| 277 | struct mlx4_ib_mr *mr, | 277 | struct mlx4_ib_mr *mr, |
| 278 | int max_pages) | 278 | int max_pages) |
| 279 | { | 279 | { |
| 280 | int size = max_pages * sizeof(u64); | ||
| 281 | int add_size; | ||
| 282 | int ret; | 280 | int ret; |
| 283 | 281 | ||
| 284 | add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0); | 282 | /* Ensure that size is aligned to DMA cacheline |
| 283 | * requirements. | ||
| 284 | * max_pages is limited to MLX4_MAX_FAST_REG_PAGES | ||
| 285 | * so page_map_size will never cross PAGE_SIZE. | ||
| 286 | */ | ||
| 287 | mr->page_map_size = roundup(max_pages * sizeof(u64), | ||
| 288 | MLX4_MR_PAGES_ALIGN); | ||
| 285 | 289 | ||
| 286 | mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL); | 290 | /* Prevent cross page boundary allocation. */ |
| 287 | if (!mr->pages_alloc) | 291 | mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL); |
| 292 | if (!mr->pages) | ||
| 288 | return -ENOMEM; | 293 | return -ENOMEM; |
| 289 | 294 | ||
| 290 | mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN); | ||
| 291 | |||
| 292 | mr->page_map = dma_map_single(device->dma_device, mr->pages, | 295 | mr->page_map = dma_map_single(device->dma_device, mr->pages, |
| 293 | size, DMA_TO_DEVICE); | 296 | mr->page_map_size, DMA_TO_DEVICE); |
| 294 | 297 | ||
| 295 | if (dma_mapping_error(device->dma_device, mr->page_map)) { | 298 | if (dma_mapping_error(device->dma_device, mr->page_map)) { |
| 296 | ret = -ENOMEM; | 299 | ret = -ENOMEM; |
| @@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device, | |||
| 298 | } | 301 | } |
| 299 | 302 | ||
| 300 | return 0; | 303 | return 0; |
| 301 | err: | ||
| 302 | kfree(mr->pages_alloc); | ||
| 303 | 304 | ||
| 305 | err: | ||
| 306 | free_page((unsigned long)mr->pages); | ||
| 304 | return ret; | 307 | return ret; |
| 305 | } | 308 | } |
| 306 | 309 | ||
| @@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr) | |||
| 309 | { | 312 | { |
| 310 | if (mr->pages) { | 313 | if (mr->pages) { |
| 311 | struct ib_device *device = mr->ibmr.device; | 314 | struct ib_device *device = mr->ibmr.device; |
| 312 | int size = mr->max_pages * sizeof(u64); | ||
| 313 | 315 | ||
| 314 | dma_unmap_single(device->dma_device, mr->page_map, | 316 | dma_unmap_single(device->dma_device, mr->page_map, |
| 315 | size, DMA_TO_DEVICE); | 317 | mr->page_map_size, DMA_TO_DEVICE); |
| 316 | kfree(mr->pages_alloc); | 318 | free_page((unsigned long)mr->pages); |
| 317 | mr->pages = NULL; | 319 | mr->pages = NULL; |
| 318 | } | 320 | } |
| 319 | } | 321 | } |
| @@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, | |||
| 537 | mr->npages = 0; | 539 | mr->npages = 0; |
| 538 | 540 | ||
| 539 | ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, | 541 | ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, |
| 540 | sizeof(u64) * mr->max_pages, | 542 | mr->page_map_size, DMA_TO_DEVICE); |
| 541 | DMA_TO_DEVICE); | ||
| 542 | 543 | ||
| 543 | rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); | 544 | rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); |
| 544 | 545 | ||
| 545 | ib_dma_sync_single_for_device(ibmr->device, mr->page_map, | 546 | ib_dma_sync_single_for_device(ibmr->device, mr->page_map, |
| 546 | sizeof(u64) * mr->max_pages, | 547 | mr->page_map_size, DMA_TO_DEVICE); |
| 547 | DMA_TO_DEVICE); | ||
| 548 | 548 | ||
| 549 | return rc; | 549 | return rc; |
| 550 | } | 550 | } |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 81b0e1fbec1d..8db8405c1e99 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
| @@ -362,7 +362,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags) | |||
| 362 | sizeof (struct mlx4_wqe_raddr_seg); | 362 | sizeof (struct mlx4_wqe_raddr_seg); |
| 363 | case MLX4_IB_QPT_RC: | 363 | case MLX4_IB_QPT_RC: |
| 364 | return sizeof (struct mlx4_wqe_ctrl_seg) + | 364 | return sizeof (struct mlx4_wqe_ctrl_seg) + |
| 365 | sizeof (struct mlx4_wqe_atomic_seg) + | 365 | sizeof (struct mlx4_wqe_masked_atomic_seg) + |
| 366 | sizeof (struct mlx4_wqe_raddr_seg); | 366 | sizeof (struct mlx4_wqe_raddr_seg); |
| 367 | case MLX4_IB_QPT_SMI: | 367 | case MLX4_IB_QPT_SMI: |
| 368 | case MLX4_IB_QPT_GSI: | 368 | case MLX4_IB_QPT_GSI: |
| @@ -1191,8 +1191,10 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, | |||
| 1191 | { | 1191 | { |
| 1192 | err = create_qp_common(to_mdev(pd->device), pd, init_attr, | 1192 | err = create_qp_common(to_mdev(pd->device), pd, init_attr, |
| 1193 | udata, 0, &qp, gfp); | 1193 | udata, 0, &qp, gfp); |
| 1194 | if (err) | 1194 | if (err) { |
| 1195 | kfree(qp); | ||
| 1195 | return ERR_PTR(err); | 1196 | return ERR_PTR(err); |
| 1197 | } | ||
| 1196 | 1198 | ||
| 1197 | qp->ibqp.qp_num = qp->mqp.qpn; | 1199 | qp->ibqp.qp_num = qp->mqp.qpn; |
| 1198 | qp->xrcdn = xrcdn; | 1200 | qp->xrcdn = xrcdn; |
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index dabcc65bd65e..9c0e67bd2ba7 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
| @@ -822,7 +822,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, | |||
| 822 | int eqn; | 822 | int eqn; |
| 823 | int err; | 823 | int err; |
| 824 | 824 | ||
| 825 | if (entries < 0) | 825 | if (entries < 0 || |
| 826 | (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) | ||
| 826 | return ERR_PTR(-EINVAL); | 827 | return ERR_PTR(-EINVAL); |
| 827 | 828 | ||
| 828 | if (check_cq_create_flags(attr->flags)) | 829 | if (check_cq_create_flags(attr->flags)) |
| @@ -1168,11 +1169,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) | |||
| 1168 | return -ENOSYS; | 1169 | return -ENOSYS; |
| 1169 | } | 1170 | } |
| 1170 | 1171 | ||
| 1171 | if (entries < 1) | 1172 | if (entries < 1 || |
| 1173 | entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) { | ||
| 1174 | mlx5_ib_warn(dev, "wrong entries number %d, max %d\n", | ||
| 1175 | entries, | ||
| 1176 | 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)); | ||
| 1172 | return -EINVAL; | 1177 | return -EINVAL; |
| 1178 | } | ||
| 1173 | 1179 | ||
| 1174 | entries = roundup_pow_of_two(entries + 1); | 1180 | entries = roundup_pow_of_two(entries + 1); |
| 1175 | if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) | 1181 | if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) |
| 1176 | return -EINVAL; | 1182 | return -EINVAL; |
| 1177 | 1183 | ||
| 1178 | if (entries == ibcq->cqe + 1) | 1184 | if (entries == ibcq->cqe + 1) |
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 1534af113058..364aab9f3c9e 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c | |||
| @@ -121,7 +121,7 @@ static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext, | |||
| 121 | pma_cnt_ext->port_xmit_data = | 121 | pma_cnt_ext->port_xmit_data = |
| 122 | cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets, | 122 | cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets, |
| 123 | transmitted_ib_multicast.octets) >> 2); | 123 | transmitted_ib_multicast.octets) >> 2); |
| 124 | pma_cnt_ext->port_xmit_data = | 124 | pma_cnt_ext->port_rcv_data = |
| 125 | cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets, | 125 | cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets, |
| 126 | received_ib_multicast.octets) >> 2); | 126 | received_ib_multicast.octets) >> 2); |
| 127 | pma_cnt_ext->port_xmit_packets = | 127 | pma_cnt_ext->port_xmit_packets = |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index c72797cd9e4f..b48ad85315dc 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -524,6 +524,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
| 524 | MLX5_CAP_ETH(dev->mdev, scatter_fcs)) | 524 | MLX5_CAP_ETH(dev->mdev, scatter_fcs)) |
| 525 | props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; | 525 | props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; |
| 526 | 526 | ||
| 527 | if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) | ||
| 528 | props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; | ||
| 529 | |||
| 527 | props->vendor_part_id = mdev->pdev->device; | 530 | props->vendor_part_id = mdev->pdev->device; |
| 528 | props->hw_ver = mdev->pdev->revision; | 531 | props->hw_ver = mdev->pdev->revision; |
| 529 | 532 | ||
| @@ -915,7 +918,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
| 915 | num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; | 918 | num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; |
| 916 | gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; | 919 | gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; |
| 917 | resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); | 920 | resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); |
| 918 | resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); | 921 | if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) |
| 922 | resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); | ||
| 919 | resp.cache_line_size = L1_CACHE_BYTES; | 923 | resp.cache_line_size = L1_CACHE_BYTES; |
| 920 | resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); | 924 | resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); |
| 921 | resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); | 925 | resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); |
| @@ -988,7 +992,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
| 988 | if (field_avail(typeof(resp), cqe_version, udata->outlen)) | 992 | if (field_avail(typeof(resp), cqe_version, udata->outlen)) |
| 989 | resp.response_length += sizeof(resp.cqe_version); | 993 | resp.response_length += sizeof(resp.cqe_version); |
| 990 | 994 | ||
| 991 | if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { | 995 | /* |
| 996 | * We don't want to expose information from the PCI bar that is located | ||
| 997 | * after 4096 bytes, so if the arch only supports larger pages, let's | ||
| 998 | * pretend we don't support reading the HCA's core clock. This is also | ||
| 999 | * forced by mmap function. | ||
| 1000 | */ | ||
| 1001 | if (PAGE_SIZE <= 4096 && | ||
| 1002 | field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { | ||
| 992 | resp.comp_mask |= | 1003 | resp.comp_mask |= |
| 993 | MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; | 1004 | MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; |
| 994 | resp.hca_core_clock_offset = | 1005 | resp.hca_core_clock_offset = |
| @@ -1798,7 +1809,7 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, | |||
| 1798 | { | 1809 | { |
| 1799 | struct mlx5_ib_dev *dev = | 1810 | struct mlx5_ib_dev *dev = |
| 1800 | container_of(device, struct mlx5_ib_dev, ib_dev.dev); | 1811 | container_of(device, struct mlx5_ib_dev, ib_dev.dev); |
| 1801 | return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev), | 1812 | return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev), |
| 1802 | fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); | 1813 | fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); |
| 1803 | } | 1814 | } |
| 1804 | 1815 | ||
| @@ -1866,14 +1877,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, | |||
| 1866 | break; | 1877 | break; |
| 1867 | 1878 | ||
| 1868 | case MLX5_DEV_EVENT_PORT_DOWN: | 1879 | case MLX5_DEV_EVENT_PORT_DOWN: |
| 1880 | case MLX5_DEV_EVENT_PORT_INITIALIZED: | ||
| 1869 | ibev.event = IB_EVENT_PORT_ERR; | 1881 | ibev.event = IB_EVENT_PORT_ERR; |
| 1870 | port = (u8)param; | 1882 | port = (u8)param; |
| 1871 | break; | 1883 | break; |
| 1872 | 1884 | ||
| 1873 | case MLX5_DEV_EVENT_PORT_INITIALIZED: | ||
| 1874 | /* not used by ULPs */ | ||
| 1875 | return; | ||
| 1876 | |||
| 1877 | case MLX5_DEV_EVENT_LID_CHANGE: | 1885 | case MLX5_DEV_EVENT_LID_CHANGE: |
| 1878 | ibev.event = IB_EVENT_LID_CHANGE; | 1886 | ibev.event = IB_EVENT_LID_CHANGE; |
| 1879 | port = (u8)param; | 1887 | port = (u8)param; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 504117657d41..ce0a7ab35a22 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
| @@ -235,6 +235,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, | |||
| 235 | qp->rq.max_gs = 0; | 235 | qp->rq.max_gs = 0; |
| 236 | qp->rq.wqe_cnt = 0; | 236 | qp->rq.wqe_cnt = 0; |
| 237 | qp->rq.wqe_shift = 0; | 237 | qp->rq.wqe_shift = 0; |
| 238 | cap->max_recv_wr = 0; | ||
| 239 | cap->max_recv_sge = 0; | ||
| 238 | } else { | 240 | } else { |
| 239 | if (ucmd) { | 241 | if (ucmd) { |
| 240 | qp->rq.wqe_cnt = ucmd->rq_wqe_count; | 242 | qp->rq.wqe_cnt = ucmd->rq_wqe_count; |
| @@ -1851,13 +1853,15 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, | |||
| 1851 | static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | 1853 | static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, |
| 1852 | const struct ib_ah_attr *ah, | 1854 | const struct ib_ah_attr *ah, |
| 1853 | struct mlx5_qp_path *path, u8 port, int attr_mask, | 1855 | struct mlx5_qp_path *path, u8 port, int attr_mask, |
| 1854 | u32 path_flags, const struct ib_qp_attr *attr) | 1856 | u32 path_flags, const struct ib_qp_attr *attr, |
| 1857 | bool alt) | ||
| 1855 | { | 1858 | { |
| 1856 | enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port); | 1859 | enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port); |
| 1857 | int err; | 1860 | int err; |
| 1858 | 1861 | ||
| 1859 | if (attr_mask & IB_QP_PKEY_INDEX) | 1862 | if (attr_mask & IB_QP_PKEY_INDEX) |
| 1860 | path->pkey_index = attr->pkey_index; | 1863 | path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index : |
| 1864 | attr->pkey_index); | ||
| 1861 | 1865 | ||
| 1862 | if (ah->ah_flags & IB_AH_GRH) { | 1866 | if (ah->ah_flags & IB_AH_GRH) { |
| 1863 | if (ah->grh.sgid_index >= | 1867 | if (ah->grh.sgid_index >= |
| @@ -1877,9 +1881,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
| 1877 | ah->grh.sgid_index); | 1881 | ah->grh.sgid_index); |
| 1878 | path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4; | 1882 | path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4; |
| 1879 | } else { | 1883 | } else { |
| 1880 | path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; | 1884 | path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; |
| 1881 | path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : | 1885 | path->fl_free_ar |= |
| 1882 | 0; | 1886 | (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0; |
| 1883 | path->rlid = cpu_to_be16(ah->dlid); | 1887 | path->rlid = cpu_to_be16(ah->dlid); |
| 1884 | path->grh_mlid = ah->src_path_bits & 0x7f; | 1888 | path->grh_mlid = ah->src_path_bits & 0x7f; |
| 1885 | if (ah->ah_flags & IB_AH_GRH) | 1889 | if (ah->ah_flags & IB_AH_GRH) |
| @@ -1903,7 +1907,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
| 1903 | path->port = port; | 1907 | path->port = port; |
| 1904 | 1908 | ||
| 1905 | if (attr_mask & IB_QP_TIMEOUT) | 1909 | if (attr_mask & IB_QP_TIMEOUT) |
| 1906 | path->ackto_lt = attr->timeout << 3; | 1910 | path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3; |
| 1907 | 1911 | ||
| 1908 | if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) | 1912 | if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) |
| 1909 | return modify_raw_packet_eth_prio(dev->mdev, | 1913 | return modify_raw_packet_eth_prio(dev->mdev, |
| @@ -2264,7 +2268,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
| 2264 | context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); | 2268 | context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); |
| 2265 | 2269 | ||
| 2266 | if (attr_mask & IB_QP_PKEY_INDEX) | 2270 | if (attr_mask & IB_QP_PKEY_INDEX) |
| 2267 | context->pri_path.pkey_index = attr->pkey_index; | 2271 | context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index); |
| 2268 | 2272 | ||
| 2269 | /* todo implement counter_index functionality */ | 2273 | /* todo implement counter_index functionality */ |
| 2270 | 2274 | ||
| @@ -2277,7 +2281,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
| 2277 | if (attr_mask & IB_QP_AV) { | 2281 | if (attr_mask & IB_QP_AV) { |
| 2278 | err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path, | 2282 | err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path, |
| 2279 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port, | 2283 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port, |
| 2280 | attr_mask, 0, attr); | 2284 | attr_mask, 0, attr, false); |
| 2281 | if (err) | 2285 | if (err) |
| 2282 | goto out; | 2286 | goto out; |
| 2283 | } | 2287 | } |
| @@ -2288,7 +2292,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
| 2288 | if (attr_mask & IB_QP_ALT_PATH) { | 2292 | if (attr_mask & IB_QP_ALT_PATH) { |
| 2289 | err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, | 2293 | err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, |
| 2290 | &context->alt_path, | 2294 | &context->alt_path, |
| 2291 | attr->alt_port_num, attr_mask, 0, attr); | 2295 | attr->alt_port_num, |
| 2296 | attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT, | ||
| 2297 | 0, attr, true); | ||
| 2292 | if (err) | 2298 | if (err) |
| 2293 | goto out; | 2299 | goto out; |
| 2294 | } | 2300 | } |
| @@ -3326,10 +3332,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr) | |||
| 3326 | return MLX5_FENCE_MODE_SMALL_AND_FENCE; | 3332 | return MLX5_FENCE_MODE_SMALL_AND_FENCE; |
| 3327 | else | 3333 | else |
| 3328 | return fence; | 3334 | return fence; |
| 3329 | 3335 | } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) { | |
| 3330 | } else { | 3336 | return MLX5_FENCE_MODE_FENCE; |
| 3331 | return 0; | ||
| 3332 | } | 3337 | } |
| 3338 | |||
| 3339 | return 0; | ||
| 3333 | } | 3340 | } |
| 3334 | 3341 | ||
| 3335 | static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, | 3342 | static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, |
| @@ -4013,11 +4020,12 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
| 4013 | if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { | 4020 | if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { |
| 4014 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); | 4021 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); |
| 4015 | to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); | 4022 | to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); |
| 4016 | qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f; | 4023 | qp_attr->alt_pkey_index = |
| 4024 | be16_to_cpu(context->alt_path.pkey_index); | ||
| 4017 | qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; | 4025 | qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; |
| 4018 | } | 4026 | } |
| 4019 | 4027 | ||
| 4020 | qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f; | 4028 | qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index); |
| 4021 | qp_attr->port_num = context->pri_path.port; | 4029 | qp_attr->port_num = context->pri_path.port; |
| 4022 | 4030 | ||
| 4023 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ | 4031 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ |
| @@ -4079,17 +4087,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, | |||
| 4079 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; | 4087 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; |
| 4080 | 4088 | ||
| 4081 | if (!ibqp->uobject) { | 4089 | if (!ibqp->uobject) { |
| 4082 | qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; | 4090 | qp_attr->cap.max_send_wr = qp->sq.max_post; |
| 4083 | qp_attr->cap.max_send_sge = qp->sq.max_gs; | 4091 | qp_attr->cap.max_send_sge = qp->sq.max_gs; |
| 4092 | qp_init_attr->qp_context = ibqp->qp_context; | ||
| 4084 | } else { | 4093 | } else { |
| 4085 | qp_attr->cap.max_send_wr = 0; | 4094 | qp_attr->cap.max_send_wr = 0; |
| 4086 | qp_attr->cap.max_send_sge = 0; | 4095 | qp_attr->cap.max_send_sge = 0; |
| 4087 | } | 4096 | } |
| 4088 | 4097 | ||
| 4089 | /* We don't support inline sends for kernel QPs (yet), and we | 4098 | qp_init_attr->qp_type = ibqp->qp_type; |
| 4090 | * don't know what userspace's value should be. | 4099 | qp_init_attr->recv_cq = ibqp->recv_cq; |
| 4091 | */ | 4100 | qp_init_attr->send_cq = ibqp->send_cq; |
| 4092 | qp_attr->cap.max_inline_data = 0; | 4101 | qp_init_attr->srq = ibqp->srq; |
| 4102 | qp_attr->cap.max_inline_data = qp->max_inline_data; | ||
| 4093 | 4103 | ||
| 4094 | qp_init_attr->cap = qp_attr->cap; | 4104 | qp_init_attr->cap = qp_attr->cap; |
| 4095 | 4105 | ||
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index ff946d5f59e4..382466a90da7 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
| @@ -2178,6 +2178,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data, | |||
| 2178 | 2178 | ||
| 2179 | switch (cmd.type) { | 2179 | switch (cmd.type) { |
| 2180 | case QIB_CMD_ASSIGN_CTXT: | 2180 | case QIB_CMD_ASSIGN_CTXT: |
| 2181 | if (rcd) { | ||
| 2182 | ret = -EINVAL; | ||
| 2183 | goto bail; | ||
| 2184 | } | ||
| 2185 | |||
| 2181 | ret = qib_assign_ctxt(fp, &cmd.cmd.user_info); | 2186 | ret = qib_assign_ctxt(fp, &cmd.cmd.user_info); |
| 2182 | if (ret) | 2187 | if (ret) |
| 2183 | goto bail; | 2188 | goto bail; |
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 7209fbc03ccb..a0b6ebee4d8a 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c | |||
| @@ -36,7 +36,6 @@ | |||
| 36 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
| 37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
| 38 | #include <linux/hugetlb.h> | 38 | #include <linux/hugetlb.h> |
| 39 | #include <linux/dma-attrs.h> | ||
| 40 | #include <linux/iommu.h> | 39 | #include <linux/iommu.h> |
| 41 | #include <linux/workqueue.h> | 40 | #include <linux/workqueue.h> |
| 42 | #include <linux/list.h> | 41 | #include <linux/list.h> |
| @@ -112,10 +111,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, | |||
| 112 | int i; | 111 | int i; |
| 113 | int flags; | 112 | int flags; |
| 114 | dma_addr_t pa; | 113 | dma_addr_t pa; |
| 115 | DEFINE_DMA_ATTRS(attrs); | ||
| 116 | |||
| 117 | if (dmasync) | ||
| 118 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); | ||
| 119 | 114 | ||
| 120 | if (!can_do_mlock()) | 115 | if (!can_do_mlock()) |
| 121 | return -EPERM; | 116 | return -EPERM; |
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 5fa4d4d81ee0..41ba7e9cadaa 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c | |||
| @@ -369,8 +369,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, | |||
| 369 | /* wrap to first map page, invert bit 0 */ | 369 | /* wrap to first map page, invert bit 0 */ |
| 370 | offset = qpt->incr | ((offset & 1) ^ 1); | 370 | offset = qpt->incr | ((offset & 1) ^ 1); |
| 371 | } | 371 | } |
| 372 | /* there can be no bits at shift and below */ | 372 | /* there can be no set bits in low-order QoS bits */ |
| 373 | WARN_ON(offset & (rdi->dparms.qos_shift - 1)); | 373 | WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1)); |
| 374 | qpn = mk_qpn(qpt, map, offset); | 374 | qpn = mk_qpn(qpt, map, offset); |
| 375 | } | 375 | } |
| 376 | 376 | ||
| @@ -502,6 +502,12 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) | |||
| 502 | */ | 502 | */ |
| 503 | static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, | 503 | static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
| 504 | enum ib_qp_type type) | 504 | enum ib_qp_type type) |
| 505 | __releases(&qp->s_lock) | ||
| 506 | __releases(&qp->s_hlock) | ||
| 507 | __releases(&qp->r_lock) | ||
| 508 | __acquires(&qp->r_lock) | ||
| 509 | __acquires(&qp->s_hlock) | ||
| 510 | __acquires(&qp->s_lock) | ||
| 505 | { | 511 | { |
| 506 | if (qp->state != IB_QPS_RESET) { | 512 | if (qp->state != IB_QPS_RESET) { |
| 507 | qp->state = IB_QPS_RESET; | 513 | qp->state = IB_QPS_RESET; |
| @@ -570,12 +576,6 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, | |||
| 570 | qp->s_ssn = 1; | 576 | qp->s_ssn = 1; |
| 571 | qp->s_lsn = 0; | 577 | qp->s_lsn = 0; |
| 572 | qp->s_mig_state = IB_MIG_MIGRATED; | 578 | qp->s_mig_state = IB_MIG_MIGRATED; |
| 573 | if (qp->s_ack_queue) | ||
| 574 | memset( | ||
| 575 | qp->s_ack_queue, | ||
| 576 | 0, | ||
| 577 | rvt_max_atomic(rdi) * | ||
| 578 | sizeof(*qp->s_ack_queue)); | ||
| 579 | qp->r_head_ack_queue = 0; | 579 | qp->r_head_ack_queue = 0; |
| 580 | qp->s_tail_ack_queue = 0; | 580 | qp->s_tail_ack_queue = 0; |
| 581 | qp->s_num_rd_atomic = 0; | 581 | qp->s_num_rd_atomic = 0; |
| @@ -699,8 +699,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, | |||
| 699 | * initialization that is needed. | 699 | * initialization that is needed. |
| 700 | */ | 700 | */ |
| 701 | priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); | 701 | priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); |
| 702 | if (!priv) | 702 | if (IS_ERR(priv)) { |
| 703 | ret = priv; | ||
| 703 | goto bail_qp; | 704 | goto bail_qp; |
| 705 | } | ||
| 704 | qp->priv = priv; | 706 | qp->priv = priv; |
| 705 | qp->timeout_jiffies = | 707 | qp->timeout_jiffies = |
| 706 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / | 708 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / |
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index e1cc2cc42f25..30c4fda7a05a 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c | |||
| @@ -501,9 +501,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb) | |||
| 501 | !rdi->driver_f.quiesce_qp || | 501 | !rdi->driver_f.quiesce_qp || |
| 502 | !rdi->driver_f.notify_error_qp || | 502 | !rdi->driver_f.notify_error_qp || |
| 503 | !rdi->driver_f.mtu_from_qp || | 503 | !rdi->driver_f.mtu_from_qp || |
| 504 | !rdi->driver_f.mtu_to_path_mtu || | 504 | !rdi->driver_f.mtu_to_path_mtu) |
| 505 | !rdi->driver_f.shut_down_port || | ||
| 506 | !rdi->driver_f.cap_mask_chg) | ||
| 507 | return -EINVAL; | 505 | return -EINVAL; |
| 508 | break; | 506 | break; |
| 509 | 507 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index bab7db6fa9ab..4f7d9b48df64 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
| @@ -94,6 +94,7 @@ enum { | |||
| 94 | IPOIB_NEIGH_TBL_FLUSH = 12, | 94 | IPOIB_NEIGH_TBL_FLUSH = 12, |
| 95 | IPOIB_FLAG_DEV_ADDR_SET = 13, | 95 | IPOIB_FLAG_DEV_ADDR_SET = 13, |
| 96 | IPOIB_FLAG_DEV_ADDR_CTRL = 14, | 96 | IPOIB_FLAG_DEV_ADDR_CTRL = 14, |
| 97 | IPOIB_FLAG_GOING_DOWN = 15, | ||
| 97 | 98 | ||
| 98 | IPOIB_MAX_BACKOFF_SECONDS = 16, | 99 | IPOIB_MAX_BACKOFF_SECONDS = 16, |
| 99 | 100 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index b2f42835d76d..951d9abcca8b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
| @@ -1486,6 +1486,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr, | |||
| 1486 | { | 1486 | { |
| 1487 | struct net_device *dev = to_net_dev(d); | 1487 | struct net_device *dev = to_net_dev(d); |
| 1488 | int ret; | 1488 | int ret; |
| 1489 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
| 1490 | |||
| 1491 | if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags)) | ||
| 1492 | return -EPERM; | ||
| 1489 | 1493 | ||
| 1490 | if (!rtnl_trylock()) | 1494 | if (!rtnl_trylock()) |
| 1491 | return restart_syscall(); | 1495 | return restart_syscall(); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 45c40a17d6a6..dc6d241b9406 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -1015,7 +1015,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) | |||
| 1015 | if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) | 1015 | if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) |
| 1016 | return false; | 1016 | return false; |
| 1017 | 1017 | ||
| 1018 | netif_addr_lock(priv->dev); | 1018 | netif_addr_lock_bh(priv->dev); |
| 1019 | 1019 | ||
| 1020 | /* The subnet prefix may have changed, update it now so we won't have | 1020 | /* The subnet prefix may have changed, update it now so we won't have |
| 1021 | * to do it later | 1021 | * to do it later |
| @@ -1026,12 +1026,12 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) | |||
| 1026 | 1026 | ||
| 1027 | search_gid.global.interface_id = priv->local_gid.global.interface_id; | 1027 | search_gid.global.interface_id = priv->local_gid.global.interface_id; |
| 1028 | 1028 | ||
| 1029 | netif_addr_unlock(priv->dev); | 1029 | netif_addr_unlock_bh(priv->dev); |
| 1030 | 1030 | ||
| 1031 | err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, | 1031 | err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, |
| 1032 | priv->dev, &port, &index); | 1032 | priv->dev, &port, &index); |
| 1033 | 1033 | ||
| 1034 | netif_addr_lock(priv->dev); | 1034 | netif_addr_lock_bh(priv->dev); |
| 1035 | 1035 | ||
| 1036 | if (search_gid.global.interface_id != | 1036 | if (search_gid.global.interface_id != |
| 1037 | priv->local_gid.global.interface_id) | 1037 | priv->local_gid.global.interface_id) |
| @@ -1092,7 +1092,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) | |||
| 1092 | } | 1092 | } |
| 1093 | 1093 | ||
| 1094 | out: | 1094 | out: |
| 1095 | netif_addr_unlock(priv->dev); | 1095 | netif_addr_unlock_bh(priv->dev); |
| 1096 | 1096 | ||
| 1097 | return ret; | 1097 | return ret; |
| 1098 | } | 1098 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 2d7c16346648..5f58c41ef787 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -1206,7 +1206,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) | |||
| 1206 | neigh = NULL; | 1206 | neigh = NULL; |
| 1207 | goto out_unlock; | 1207 | goto out_unlock; |
| 1208 | } | 1208 | } |
| 1209 | neigh->alive = jiffies; | 1209 | |
| 1210 | if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)) | ||
| 1211 | neigh->alive = jiffies; | ||
| 1210 | goto out_unlock; | 1212 | goto out_unlock; |
| 1211 | } | 1213 | } |
| 1212 | } | 1214 | } |
| @@ -1851,7 +1853,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) | |||
| 1851 | struct ipoib_dev_priv *child_priv; | 1853 | struct ipoib_dev_priv *child_priv; |
| 1852 | struct net_device *netdev = priv->dev; | 1854 | struct net_device *netdev = priv->dev; |
| 1853 | 1855 | ||
| 1854 | netif_addr_lock(netdev); | 1856 | netif_addr_lock_bh(netdev); |
| 1855 | 1857 | ||
| 1856 | memcpy(&priv->local_gid.global.interface_id, | 1858 | memcpy(&priv->local_gid.global.interface_id, |
| 1857 | &gid->global.interface_id, | 1859 | &gid->global.interface_id, |
| @@ -1859,7 +1861,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) | |||
| 1859 | memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); | 1861 | memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); |
| 1860 | clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); | 1862 | clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); |
| 1861 | 1863 | ||
| 1862 | netif_addr_unlock(netdev); | 1864 | netif_addr_unlock_bh(netdev); |
| 1863 | 1865 | ||
| 1864 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { | 1866 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { |
| 1865 | down_read(&priv->vlan_rwsem); | 1867 | down_read(&priv->vlan_rwsem); |
| @@ -1875,7 +1877,7 @@ static int ipoib_check_lladdr(struct net_device *dev, | |||
| 1875 | union ib_gid *gid = (union ib_gid *)(ss->__data + 4); | 1877 | union ib_gid *gid = (union ib_gid *)(ss->__data + 4); |
| 1876 | int ret = 0; | 1878 | int ret = 0; |
| 1877 | 1879 | ||
| 1878 | netif_addr_lock(dev); | 1880 | netif_addr_lock_bh(dev); |
| 1879 | 1881 | ||
| 1880 | /* Make sure the QPN, reserved and subnet prefix match the current | 1882 | /* Make sure the QPN, reserved and subnet prefix match the current |
| 1881 | * lladdr, it also makes sure the lladdr is unicast. | 1883 | * lladdr, it also makes sure the lladdr is unicast. |
| @@ -1885,7 +1887,7 @@ static int ipoib_check_lladdr(struct net_device *dev, | |||
| 1885 | gid->global.interface_id == 0) | 1887 | gid->global.interface_id == 0) |
| 1886 | ret = -EINVAL; | 1888 | ret = -EINVAL; |
| 1887 | 1889 | ||
| 1888 | netif_addr_unlock(dev); | 1890 | netif_addr_unlock_bh(dev); |
| 1889 | 1891 | ||
| 1890 | return ret; | 1892 | return ret; |
| 1891 | } | 1893 | } |
| @@ -2141,6 +2143,9 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) | |||
| 2141 | ib_unregister_event_handler(&priv->event_handler); | 2143 | ib_unregister_event_handler(&priv->event_handler); |
| 2142 | flush_workqueue(ipoib_workqueue); | 2144 | flush_workqueue(ipoib_workqueue); |
| 2143 | 2145 | ||
| 2146 | /* mark interface in the middle of destruction */ | ||
| 2147 | set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags); | ||
| 2148 | |||
| 2144 | rtnl_lock(); | 2149 | rtnl_lock(); |
| 2145 | dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); | 2150 | dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); |
| 2146 | rtnl_unlock(); | 2151 | rtnl_unlock(); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 82fbc9442608..d3394b6add24 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
| @@ -582,13 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work) | |||
| 582 | return; | 582 | return; |
| 583 | } | 583 | } |
| 584 | priv->local_lid = port_attr.lid; | 584 | priv->local_lid = port_attr.lid; |
| 585 | netif_addr_lock(dev); | 585 | netif_addr_lock_bh(dev); |
| 586 | 586 | ||
| 587 | if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { | 587 | if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { |
| 588 | netif_addr_unlock(dev); | 588 | netif_addr_unlock_bh(dev); |
| 589 | return; | 589 | return; |
| 590 | } | 590 | } |
| 591 | netif_addr_unlock(dev); | 591 | netif_addr_unlock_bh(dev); |
| 592 | 592 | ||
| 593 | spin_lock_irq(&priv->lock); | 593 | spin_lock_irq(&priv->lock); |
| 594 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) | 594 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 64a35595eab8..a2f9f29c6ab5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
| @@ -131,6 +131,9 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
| 131 | 131 | ||
| 132 | ppriv = netdev_priv(pdev); | 132 | ppriv = netdev_priv(pdev); |
| 133 | 133 | ||
| 134 | if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags)) | ||
| 135 | return -EPERM; | ||
| 136 | |||
| 134 | snprintf(intf_name, sizeof intf_name, "%s.%04x", | 137 | snprintf(intf_name, sizeof intf_name, "%s.%04x", |
| 135 | ppriv->dev->name, pkey); | 138 | ppriv->dev->name, pkey); |
| 136 | priv = ipoib_intf_alloc(intf_name); | 139 | priv = ipoib_intf_alloc(intf_name); |
| @@ -183,6 +186,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) | |||
| 183 | 186 | ||
| 184 | ppriv = netdev_priv(pdev); | 187 | ppriv = netdev_priv(pdev); |
| 185 | 188 | ||
| 189 | if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags)) | ||
| 190 | return -EPERM; | ||
| 191 | |||
| 186 | if (!rtnl_trylock()) | 192 | if (!rtnl_trylock()) |
| 187 | return restart_syscall(); | 193 | return restart_syscall(); |
| 188 | 194 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 646de170ec12..3322ed750172 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
| @@ -1457,7 +1457,6 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, | |||
| 1457 | { | 1457 | { |
| 1458 | unsigned int sg_offset = 0; | 1458 | unsigned int sg_offset = 0; |
| 1459 | 1459 | ||
| 1460 | state->desc = req->indirect_desc; | ||
| 1461 | state->fr.next = req->fr_list; | 1460 | state->fr.next = req->fr_list; |
| 1462 | state->fr.end = req->fr_list + ch->target->mr_per_cmd; | 1461 | state->fr.end = req->fr_list + ch->target->mr_per_cmd; |
| 1463 | state->sg = scat; | 1462 | state->sg = scat; |
| @@ -1489,7 +1488,6 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, | |||
| 1489 | struct scatterlist *sg; | 1488 | struct scatterlist *sg; |
| 1490 | int i; | 1489 | int i; |
| 1491 | 1490 | ||
| 1492 | state->desc = req->indirect_desc; | ||
| 1493 | for_each_sg(scat, sg, count, i) { | 1491 | for_each_sg(scat, sg, count, i) { |
| 1494 | srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), | 1492 | srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), |
| 1495 | ib_sg_dma_len(dev->dev, sg), | 1493 | ib_sg_dma_len(dev->dev, sg), |
| @@ -1655,6 +1653,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, | |||
| 1655 | target->indirect_size, DMA_TO_DEVICE); | 1653 | target->indirect_size, DMA_TO_DEVICE); |
| 1656 | 1654 | ||
| 1657 | memset(&state, 0, sizeof(state)); | 1655 | memset(&state, 0, sizeof(state)); |
| 1656 | state.desc = req->indirect_desc; | ||
| 1658 | if (dev->use_fast_reg) | 1657 | if (dev->use_fast_reg) |
| 1659 | ret = srp_map_sg_fr(&state, ch, req, scat, count); | 1658 | ret = srp_map_sg_fr(&state, ch, req, scat, count); |
| 1660 | else if (dev->use_fmr) | 1659 | else if (dev->use_fmr) |
| @@ -3526,7 +3525,7 @@ static void srp_add_one(struct ib_device *device) | |||
| 3526 | int mr_page_shift, p; | 3525 | int mr_page_shift, p; |
| 3527 | u64 max_pages_per_mr; | 3526 | u64 max_pages_per_mr; |
| 3528 | 3527 | ||
| 3529 | srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); | 3528 | srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL); |
| 3530 | if (!srp_dev) | 3529 | if (!srp_dev) |
| 3531 | return; | 3530 | return; |
| 3532 | 3531 | ||
| @@ -3586,8 +3585,6 @@ static void srp_add_one(struct ib_device *device) | |||
| 3586 | IB_ACCESS_REMOTE_WRITE); | 3585 | IB_ACCESS_REMOTE_WRITE); |
| 3587 | if (IS_ERR(srp_dev->global_mr)) | 3586 | if (IS_ERR(srp_dev->global_mr)) |
| 3588 | goto err_pd; | 3587 | goto err_pd; |
| 3589 | } else { | ||
| 3590 | srp_dev->global_mr = NULL; | ||
| 3591 | } | 3588 | } |
| 3592 | 3589 | ||
| 3593 | for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { | 3590 | for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index e68b20cba70b..4a4155640d51 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
| @@ -1638,8 +1638,7 @@ retry: | |||
| 1638 | */ | 1638 | */ |
| 1639 | qp_init->cap.max_send_wr = srp_sq_size / 2; | 1639 | qp_init->cap.max_send_wr = srp_sq_size / 2; |
| 1640 | qp_init->cap.max_rdma_ctxs = srp_sq_size / 2; | 1640 | qp_init->cap.max_rdma_ctxs = srp_sq_size / 2; |
| 1641 | qp_init->cap.max_send_sge = max(sdev->device->attrs.max_sge_rd, | 1641 | qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE; |
| 1642 | sdev->device->attrs.max_sge); | ||
| 1643 | qp_init->port_num = ch->sport->port; | 1642 | qp_init->port_num = ch->sport->port; |
| 1644 | 1643 | ||
| 1645 | ch->qp = ib_create_qp(sdev->pd, qp_init); | 1644 | ch->qp = ib_create_qp(sdev->pd, qp_init); |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h index fee6bfd7ca21..389030487da7 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.h +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h | |||
| @@ -106,6 +106,7 @@ enum { | |||
| 106 | SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2, | 106 | SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2, |
| 107 | 107 | ||
| 108 | SRPT_DEF_SG_TABLESIZE = 128, | 108 | SRPT_DEF_SG_TABLESIZE = 128, |
| 109 | SRPT_DEF_SG_PER_WQE = 16, | ||
| 109 | 110 | ||
| 110 | MIN_SRPT_SQ_SIZE = 16, | 111 | MIN_SRPT_SQ_SIZE = 16, |
| 111 | DEF_SRPT_SQ_SIZE = 4096, | 112 | DEF_SRPT_SQ_SIZE = 4096, |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 94b68213c50d..5f6b3bcab078 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
| @@ -1941,6 +1941,7 @@ static struct iommu_ops arm_smmu_ops = { | |||
| 1941 | .attach_dev = arm_smmu_attach_dev, | 1941 | .attach_dev = arm_smmu_attach_dev, |
| 1942 | .map = arm_smmu_map, | 1942 | .map = arm_smmu_map, |
| 1943 | .unmap = arm_smmu_unmap, | 1943 | .unmap = arm_smmu_unmap, |
| 1944 | .map_sg = default_iommu_map_sg, | ||
| 1944 | .iova_to_phys = arm_smmu_iova_to_phys, | 1945 | .iova_to_phys = arm_smmu_iova_to_phys, |
| 1945 | .add_device = arm_smmu_add_device, | 1946 | .add_device = arm_smmu_add_device, |
| 1946 | .remove_device = arm_smmu_remove_device, | 1947 | .remove_device = arm_smmu_remove_device, |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a644d0cec2d8..10700945994e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -3222,11 +3222,6 @@ static int __init init_dmars(void) | |||
| 3222 | } | 3222 | } |
| 3223 | } | 3223 | } |
| 3224 | 3224 | ||
| 3225 | iommu_flush_write_buffer(iommu); | ||
| 3226 | iommu_set_root_entry(iommu); | ||
| 3227 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); | ||
| 3228 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); | ||
| 3229 | |||
| 3230 | if (!ecap_pass_through(iommu->ecap)) | 3225 | if (!ecap_pass_through(iommu->ecap)) |
| 3231 | hw_pass_through = 0; | 3226 | hw_pass_through = 0; |
| 3232 | #ifdef CONFIG_INTEL_IOMMU_SVM | 3227 | #ifdef CONFIG_INTEL_IOMMU_SVM |
| @@ -3235,6 +3230,18 @@ static int __init init_dmars(void) | |||
| 3235 | #endif | 3230 | #endif |
| 3236 | } | 3231 | } |
| 3237 | 3232 | ||
| 3233 | /* | ||
| 3234 | * Now that qi is enabled on all iommus, set the root entry and flush | ||
| 3235 | * caches. This is required on some Intel X58 chipsets, otherwise the | ||
| 3236 | * flush_context function will loop forever and the boot hangs. | ||
| 3237 | */ | ||
| 3238 | for_each_active_iommu(iommu, drhd) { | ||
| 3239 | iommu_flush_write_buffer(iommu); | ||
| 3240 | iommu_set_root_entry(iommu); | ||
| 3241 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); | ||
| 3242 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); | ||
| 3243 | } | ||
| 3244 | |||
| 3238 | if (iommu_pass_through) | 3245 | if (iommu_pass_through) |
| 3239 | iommu_identity_mapping |= IDENTMAP_ALL; | 3246 | iommu_identity_mapping |= IDENTMAP_ALL; |
| 3240 | 3247 | ||
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index c7d6156ff536..25b4627cb57f 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c | |||
| @@ -815,7 +815,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, | |||
| 815 | dte_addr = virt_to_phys(rk_domain->dt); | 815 | dte_addr = virt_to_phys(rk_domain->dt); |
| 816 | for (i = 0; i < iommu->num_mmu; i++) { | 816 | for (i = 0; i < iommu->num_mmu; i++) { |
| 817 | rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr); | 817 | rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr); |
| 818 | rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); | 818 | rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); |
| 819 | rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); | 819 | rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); |
| 820 | } | 820 | } |
| 821 | 821 | ||
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 3b5e10aa48ab..8a4adbeb2b8c 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
| @@ -746,6 +746,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, | |||
| 746 | /* verify that it doesn't conflict with an IPI irq */ | 746 | /* verify that it doesn't conflict with an IPI irq */ |
| 747 | if (test_bit(spec->hwirq, ipi_resrv)) | 747 | if (test_bit(spec->hwirq, ipi_resrv)) |
| 748 | return -EBUSY; | 748 | return -EBUSY; |
| 749 | |||
| 750 | hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq); | ||
| 751 | |||
| 752 | return irq_domain_set_hwirq_and_chip(d, virq, hwirq, | ||
| 753 | &gic_level_irq_controller, | ||
| 754 | NULL); | ||
| 749 | } else { | 755 | } else { |
| 750 | base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); | 756 | base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); |
| 751 | if (base_hwirq == gic_shared_intrs) { | 757 | if (base_hwirq == gic_shared_intrs) { |
| @@ -867,10 +873,14 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq, | |||
| 867 | &gic_level_irq_controller, | 873 | &gic_level_irq_controller, |
| 868 | NULL); | 874 | NULL); |
| 869 | if (ret) | 875 | if (ret) |
| 870 | return ret; | 876 | goto error; |
| 871 | } | 877 | } |
| 872 | 878 | ||
| 873 | return 0; | 879 | return 0; |
| 880 | |||
| 881 | error: | ||
| 882 | irq_domain_free_irqs_parent(d, virq, nr_irqs); | ||
| 883 | return ret; | ||
| 874 | } | 884 | } |
| 875 | 885 | ||
| 876 | void gic_dev_domain_free(struct irq_domain *d, unsigned int virq, | 886 | void gic_dev_domain_free(struct irq_domain *d, unsigned int virq, |
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index 3495d5d6547f..3bce44893021 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c | |||
| @@ -53,11 +53,12 @@ static void led_timer_function(unsigned long data) | |||
| 53 | 53 | ||
| 54 | if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) { | 54 | if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) { |
| 55 | led_set_brightness_nosleep(led_cdev, LED_OFF); | 55 | led_set_brightness_nosleep(led_cdev, LED_OFF); |
| 56 | led_cdev->flags &= ~LED_BLINK_SW; | ||
| 56 | return; | 57 | return; |
| 57 | } | 58 | } |
| 58 | 59 | ||
| 59 | if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) { | 60 | if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) { |
| 60 | led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP; | 61 | led_cdev->flags &= ~(LED_BLINK_ONESHOT_STOP | LED_BLINK_SW); |
| 61 | return; | 62 | return; |
| 62 | } | 63 | } |
| 63 | 64 | ||
| @@ -151,6 +152,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev, | |||
| 151 | return; | 152 | return; |
| 152 | } | 153 | } |
| 153 | 154 | ||
| 155 | led_cdev->flags |= LED_BLINK_SW; | ||
| 154 | mod_timer(&led_cdev->blink_timer, jiffies + 1); | 156 | mod_timer(&led_cdev->blink_timer, jiffies + 1); |
| 155 | } | 157 | } |
| 156 | 158 | ||
| @@ -219,6 +221,7 @@ void led_stop_software_blink(struct led_classdev *led_cdev) | |||
| 219 | del_timer_sync(&led_cdev->blink_timer); | 221 | del_timer_sync(&led_cdev->blink_timer); |
| 220 | led_cdev->blink_delay_on = 0; | 222 | led_cdev->blink_delay_on = 0; |
| 221 | led_cdev->blink_delay_off = 0; | 223 | led_cdev->blink_delay_off = 0; |
| 224 | led_cdev->flags &= ~LED_BLINK_SW; | ||
| 222 | } | 225 | } |
| 223 | EXPORT_SYMBOL_GPL(led_stop_software_blink); | 226 | EXPORT_SYMBOL_GPL(led_stop_software_blink); |
| 224 | 227 | ||
| @@ -226,10 +229,10 @@ void led_set_brightness(struct led_classdev *led_cdev, | |||
| 226 | enum led_brightness brightness) | 229 | enum led_brightness brightness) |
| 227 | { | 230 | { |
| 228 | /* | 231 | /* |
| 229 | * In case blinking is on delay brightness setting | 232 | * If software blink is active, delay brightness setting |
| 230 | * until the next timer tick. | 233 | * until the next timer tick. |
| 231 | */ | 234 | */ |
| 232 | if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) { | 235 | if (led_cdev->flags & LED_BLINK_SW) { |
| 233 | /* | 236 | /* |
| 234 | * If we need to disable soft blinking delegate this to the | 237 | * If we need to disable soft blinking delegate this to the |
| 235 | * work queue task to avoid problems in case we are called | 238 | * work queue task to avoid problems in case we are called |
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c index 410c39c62dc7..c9f386213e9e 100644 --- a/drivers/leds/trigger/ledtrig-heartbeat.c +++ b/drivers/leds/trigger/ledtrig-heartbeat.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
| 20 | #include <linux/leds.h> | 20 | #include <linux/leds.h> |
| 21 | #include <linux/reboot.h> | 21 | #include <linux/reboot.h> |
| 22 | #include <linux/suspend.h> | ||
| 22 | #include "../leds.h" | 23 | #include "../leds.h" |
| 23 | 24 | ||
| 24 | static int panic_heartbeats; | 25 | static int panic_heartbeats; |
| @@ -154,6 +155,30 @@ static struct led_trigger heartbeat_led_trigger = { | |||
| 154 | .deactivate = heartbeat_trig_deactivate, | 155 | .deactivate = heartbeat_trig_deactivate, |
| 155 | }; | 156 | }; |
| 156 | 157 | ||
| 158 | static int heartbeat_pm_notifier(struct notifier_block *nb, | ||
| 159 | unsigned long pm_event, void *unused) | ||
| 160 | { | ||
| 161 | int rc; | ||
| 162 | |||
| 163 | switch (pm_event) { | ||
| 164 | case PM_SUSPEND_PREPARE: | ||
| 165 | case PM_HIBERNATION_PREPARE: | ||
| 166 | case PM_RESTORE_PREPARE: | ||
| 167 | led_trigger_unregister(&heartbeat_led_trigger); | ||
| 168 | break; | ||
| 169 | case PM_POST_SUSPEND: | ||
| 170 | case PM_POST_HIBERNATION: | ||
| 171 | case PM_POST_RESTORE: | ||
| 172 | rc = led_trigger_register(&heartbeat_led_trigger); | ||
| 173 | if (rc) | ||
| 174 | pr_err("could not re-register heartbeat trigger\n"); | ||
| 175 | break; | ||
| 176 | default: | ||
| 177 | break; | ||
| 178 | } | ||
| 179 | return NOTIFY_DONE; | ||
| 180 | } | ||
| 181 | |||
| 157 | static int heartbeat_reboot_notifier(struct notifier_block *nb, | 182 | static int heartbeat_reboot_notifier(struct notifier_block *nb, |
| 158 | unsigned long code, void *unused) | 183 | unsigned long code, void *unused) |
| 159 | { | 184 | { |
| @@ -168,6 +193,10 @@ static int heartbeat_panic_notifier(struct notifier_block *nb, | |||
| 168 | return NOTIFY_DONE; | 193 | return NOTIFY_DONE; |
| 169 | } | 194 | } |
| 170 | 195 | ||
| 196 | static struct notifier_block heartbeat_pm_nb = { | ||
| 197 | .notifier_call = heartbeat_pm_notifier, | ||
| 198 | }; | ||
| 199 | |||
| 171 | static struct notifier_block heartbeat_reboot_nb = { | 200 | static struct notifier_block heartbeat_reboot_nb = { |
| 172 | .notifier_call = heartbeat_reboot_notifier, | 201 | .notifier_call = heartbeat_reboot_notifier, |
| 173 | }; | 202 | }; |
| @@ -184,12 +213,14 @@ static int __init heartbeat_trig_init(void) | |||
| 184 | atomic_notifier_chain_register(&panic_notifier_list, | 213 | atomic_notifier_chain_register(&panic_notifier_list, |
| 185 | &heartbeat_panic_nb); | 214 | &heartbeat_panic_nb); |
| 186 | register_reboot_notifier(&heartbeat_reboot_nb); | 215 | register_reboot_notifier(&heartbeat_reboot_nb); |
| 216 | register_pm_notifier(&heartbeat_pm_nb); | ||
| 187 | } | 217 | } |
| 188 | return rc; | 218 | return rc; |
| 189 | } | 219 | } |
| 190 | 220 | ||
| 191 | static void __exit heartbeat_trig_exit(void) | 221 | static void __exit heartbeat_trig_exit(void) |
| 192 | { | 222 | { |
| 223 | unregister_pm_notifier(&heartbeat_pm_nb); | ||
| 193 | unregister_reboot_notifier(&heartbeat_reboot_nb); | 224 | unregister_reboot_notifier(&heartbeat_reboot_nb); |
| 194 | atomic_notifier_chain_unregister(&panic_notifier_list, | 225 | atomic_notifier_chain_unregister(&panic_notifier_list, |
| 195 | &heartbeat_panic_nb); | 226 | &heartbeat_panic_nb); |
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c index b73c6e7d28e4..6f2c8522e14a 100644 --- a/drivers/mcb/mcb-core.c +++ b/drivers/mcb/mcb-core.c | |||
| @@ -61,21 +61,36 @@ static int mcb_probe(struct device *dev) | |||
| 61 | struct mcb_driver *mdrv = to_mcb_driver(dev->driver); | 61 | struct mcb_driver *mdrv = to_mcb_driver(dev->driver); |
| 62 | struct mcb_device *mdev = to_mcb_device(dev); | 62 | struct mcb_device *mdev = to_mcb_device(dev); |
| 63 | const struct mcb_device_id *found_id; | 63 | const struct mcb_device_id *found_id; |
| 64 | struct module *carrier_mod; | ||
| 65 | int ret; | ||
| 64 | 66 | ||
| 65 | found_id = mcb_match_id(mdrv->id_table, mdev); | 67 | found_id = mcb_match_id(mdrv->id_table, mdev); |
| 66 | if (!found_id) | 68 | if (!found_id) |
| 67 | return -ENODEV; | 69 | return -ENODEV; |
| 68 | 70 | ||
| 69 | return mdrv->probe(mdev, found_id); | 71 | carrier_mod = mdev->dev.parent->driver->owner; |
| 72 | if (!try_module_get(carrier_mod)) | ||
| 73 | return -EINVAL; | ||
| 74 | |||
| 75 | get_device(dev); | ||
| 76 | ret = mdrv->probe(mdev, found_id); | ||
| 77 | if (ret) | ||
| 78 | module_put(carrier_mod); | ||
| 79 | |||
| 80 | return ret; | ||
| 70 | } | 81 | } |
| 71 | 82 | ||
| 72 | static int mcb_remove(struct device *dev) | 83 | static int mcb_remove(struct device *dev) |
| 73 | { | 84 | { |
| 74 | struct mcb_driver *mdrv = to_mcb_driver(dev->driver); | 85 | struct mcb_driver *mdrv = to_mcb_driver(dev->driver); |
| 75 | struct mcb_device *mdev = to_mcb_device(dev); | 86 | struct mcb_device *mdev = to_mcb_device(dev); |
| 87 | struct module *carrier_mod; | ||
| 76 | 88 | ||
| 77 | mdrv->remove(mdev); | 89 | mdrv->remove(mdev); |
| 78 | 90 | ||
| 91 | carrier_mod = mdev->dev.parent->driver->owner; | ||
| 92 | module_put(carrier_mod); | ||
| 93 | |||
| 79 | put_device(&mdev->dev); | 94 | put_device(&mdev->dev); |
| 80 | 95 | ||
| 81 | return 0; | 96 | return 0; |
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index d7723ce772b3..c04bc6afb965 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c | |||
| @@ -1274,8 +1274,6 @@ struct uvc_xu_control_mapping32 { | |||
| 1274 | static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, | 1274 | static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, |
| 1275 | const struct uvc_xu_control_mapping32 __user *up) | 1275 | const struct uvc_xu_control_mapping32 __user *up) |
| 1276 | { | 1276 | { |
| 1277 | struct uvc_menu_info __user *umenus; | ||
| 1278 | struct uvc_menu_info __user *kmenus; | ||
| 1279 | compat_caddr_t p; | 1277 | compat_caddr_t p; |
| 1280 | 1278 | ||
| 1281 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || | 1279 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
| @@ -1292,17 +1290,7 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, | |||
| 1292 | 1290 | ||
| 1293 | if (__get_user(p, &up->menu_info)) | 1291 | if (__get_user(p, &up->menu_info)) |
| 1294 | return -EFAULT; | 1292 | return -EFAULT; |
| 1295 | umenus = compat_ptr(p); | 1293 | kp->menu_info = compat_ptr(p); |
| 1296 | if (!access_ok(VERIFY_READ, umenus, kp->menu_count * sizeof(*umenus))) | ||
| 1297 | return -EFAULT; | ||
| 1298 | |||
| 1299 | kmenus = compat_alloc_user_space(kp->menu_count * sizeof(*kmenus)); | ||
| 1300 | if (kmenus == NULL) | ||
| 1301 | return -EFAULT; | ||
| 1302 | kp->menu_info = kmenus; | ||
| 1303 | |||
| 1304 | if (copy_in_user(kmenus, umenus, kp->menu_count * sizeof(*umenus))) | ||
| 1305 | return -EFAULT; | ||
| 1306 | 1294 | ||
| 1307 | return 0; | 1295 | return 0; |
| 1308 | } | 1296 | } |
| @@ -1310,10 +1298,6 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, | |||
| 1310 | static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, | 1298 | static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, |
| 1311 | struct uvc_xu_control_mapping32 __user *up) | 1299 | struct uvc_xu_control_mapping32 __user *up) |
| 1312 | { | 1300 | { |
| 1313 | struct uvc_menu_info __user *umenus; | ||
| 1314 | struct uvc_menu_info __user *kmenus = kp->menu_info; | ||
| 1315 | compat_caddr_t p; | ||
| 1316 | |||
| 1317 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || | 1301 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
| 1318 | __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) || | 1302 | __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) || |
| 1319 | __put_user(kp->menu_count, &up->menu_count)) | 1303 | __put_user(kp->menu_count, &up->menu_count)) |
| @@ -1322,16 +1306,6 @@ static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, | |||
| 1322 | if (__clear_user(up->reserved, sizeof(up->reserved))) | 1306 | if (__clear_user(up->reserved, sizeof(up->reserved))) |
| 1323 | return -EFAULT; | 1307 | return -EFAULT; |
| 1324 | 1308 | ||
| 1325 | if (kp->menu_count == 0) | ||
| 1326 | return 0; | ||
| 1327 | |||
| 1328 | if (get_user(p, &up->menu_info)) | ||
| 1329 | return -EFAULT; | ||
| 1330 | umenus = compat_ptr(p); | ||
| 1331 | |||
| 1332 | if (copy_in_user(umenus, kmenus, kp->menu_count * sizeof(*umenus))) | ||
| 1333 | return -EFAULT; | ||
| 1334 | |||
| 1335 | return 0; | 1309 | return 0; |
| 1336 | } | 1310 | } |
| 1337 | 1311 | ||
| @@ -1346,8 +1320,6 @@ struct uvc_xu_control_query32 { | |||
| 1346 | static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, | 1320 | static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, |
| 1347 | const struct uvc_xu_control_query32 __user *up) | 1321 | const struct uvc_xu_control_query32 __user *up) |
| 1348 | { | 1322 | { |
| 1349 | u8 __user *udata; | ||
| 1350 | u8 __user *kdata; | ||
| 1351 | compat_caddr_t p; | 1323 | compat_caddr_t p; |
| 1352 | 1324 | ||
| 1353 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || | 1325 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
| @@ -1361,17 +1333,7 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, | |||
| 1361 | 1333 | ||
| 1362 | if (__get_user(p, &up->data)) | 1334 | if (__get_user(p, &up->data)) |
| 1363 | return -EFAULT; | 1335 | return -EFAULT; |
| 1364 | udata = compat_ptr(p); | 1336 | kp->data = compat_ptr(p); |
| 1365 | if (!access_ok(VERIFY_READ, udata, kp->size)) | ||
| 1366 | return -EFAULT; | ||
| 1367 | |||
| 1368 | kdata = compat_alloc_user_space(kp->size); | ||
| 1369 | if (kdata == NULL) | ||
| 1370 | return -EFAULT; | ||
| 1371 | kp->data = kdata; | ||
| 1372 | |||
| 1373 | if (copy_in_user(kdata, udata, kp->size)) | ||
| 1374 | return -EFAULT; | ||
| 1375 | 1337 | ||
| 1376 | return 0; | 1338 | return 0; |
| 1377 | } | 1339 | } |
| @@ -1379,26 +1341,10 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, | |||
| 1379 | static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, | 1341 | static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, |
| 1380 | struct uvc_xu_control_query32 __user *up) | 1342 | struct uvc_xu_control_query32 __user *up) |
| 1381 | { | 1343 | { |
| 1382 | u8 __user *udata; | ||
| 1383 | u8 __user *kdata = kp->data; | ||
| 1384 | compat_caddr_t p; | ||
| 1385 | |||
| 1386 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || | 1344 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
| 1387 | __copy_to_user(up, kp, offsetof(typeof(*up), data))) | 1345 | __copy_to_user(up, kp, offsetof(typeof(*up), data))) |
| 1388 | return -EFAULT; | 1346 | return -EFAULT; |
| 1389 | 1347 | ||
| 1390 | if (kp->size == 0) | ||
| 1391 | return 0; | ||
| 1392 | |||
| 1393 | if (get_user(p, &up->data)) | ||
| 1394 | return -EFAULT; | ||
| 1395 | udata = compat_ptr(p); | ||
| 1396 | if (!access_ok(VERIFY_READ, udata, kp->size)) | ||
| 1397 | return -EFAULT; | ||
| 1398 | |||
| 1399 | if (copy_in_user(udata, kdata, kp->size)) | ||
| 1400 | return -EFAULT; | ||
| 1401 | |||
| 1402 | return 0; | 1348 | return 0; |
| 1403 | } | 1349 | } |
| 1404 | 1350 | ||
| @@ -1408,47 +1354,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, | |||
| 1408 | static long uvc_v4l2_compat_ioctl32(struct file *file, | 1354 | static long uvc_v4l2_compat_ioctl32(struct file *file, |
| 1409 | unsigned int cmd, unsigned long arg) | 1355 | unsigned int cmd, unsigned long arg) |
| 1410 | { | 1356 | { |
| 1357 | struct uvc_fh *handle = file->private_data; | ||
| 1411 | union { | 1358 | union { |
| 1412 | struct uvc_xu_control_mapping xmap; | 1359 | struct uvc_xu_control_mapping xmap; |
| 1413 | struct uvc_xu_control_query xqry; | 1360 | struct uvc_xu_control_query xqry; |
| 1414 | } karg; | 1361 | } karg; |
| 1415 | void __user *up = compat_ptr(arg); | 1362 | void __user *up = compat_ptr(arg); |
| 1416 | mm_segment_t old_fs; | ||
| 1417 | long ret; | 1363 | long ret; |
| 1418 | 1364 | ||
| 1419 | switch (cmd) { | 1365 | switch (cmd) { |
| 1420 | case UVCIOC_CTRL_MAP32: | 1366 | case UVCIOC_CTRL_MAP32: |
| 1421 | cmd = UVCIOC_CTRL_MAP; | ||
| 1422 | ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up); | 1367 | ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up); |
| 1368 | if (ret) | ||
| 1369 | return ret; | ||
| 1370 | ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap); | ||
| 1371 | if (ret) | ||
| 1372 | return ret; | ||
| 1373 | ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up); | ||
| 1374 | if (ret) | ||
| 1375 | return ret; | ||
| 1376 | |||
| 1423 | break; | 1377 | break; |
| 1424 | 1378 | ||
| 1425 | case UVCIOC_CTRL_QUERY32: | 1379 | case UVCIOC_CTRL_QUERY32: |
| 1426 | cmd = UVCIOC_CTRL_QUERY; | ||
| 1427 | ret = uvc_v4l2_get_xu_query(&karg.xqry, up); | 1380 | ret = uvc_v4l2_get_xu_query(&karg.xqry, up); |
| 1381 | if (ret) | ||
| 1382 | return ret; | ||
| 1383 | ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry); | ||
| 1384 | if (ret) | ||
| 1385 | return ret; | ||
| 1386 | ret = uvc_v4l2_put_xu_query(&karg.xqry, up); | ||
| 1387 | if (ret) | ||
| 1388 | return ret; | ||
| 1428 | break; | 1389 | break; |
| 1429 | 1390 | ||
| 1430 | default: | 1391 | default: |
| 1431 | return -ENOIOCTLCMD; | 1392 | return -ENOIOCTLCMD; |
| 1432 | } | 1393 | } |
| 1433 | 1394 | ||
| 1434 | old_fs = get_fs(); | ||
| 1435 | set_fs(KERNEL_DS); | ||
| 1436 | ret = video_ioctl2(file, cmd, (unsigned long)&karg); | ||
| 1437 | set_fs(old_fs); | ||
| 1438 | |||
| 1439 | if (ret < 0) | ||
| 1440 | return ret; | ||
| 1441 | |||
| 1442 | switch (cmd) { | ||
| 1443 | case UVCIOC_CTRL_MAP: | ||
| 1444 | ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up); | ||
| 1445 | break; | ||
| 1446 | |||
| 1447 | case UVCIOC_CTRL_QUERY: | ||
| 1448 | ret = uvc_v4l2_put_xu_query(&karg.xqry, up); | ||
| 1449 | break; | ||
| 1450 | } | ||
| 1451 | |||
| 1452 | return ret; | 1395 | return ret; |
| 1453 | } | 1396 | } |
| 1454 | #endif | 1397 | #endif |
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c index ca94bded3386..8bef4331bd51 100644 --- a/drivers/media/v4l2-core/v4l2-mc.c +++ b/drivers/media/v4l2-core/v4l2-mc.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Media Controller ancillary functions | 2 | * Media Controller ancillary functions |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 4 | * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@kernel.org> |
| 5 | * Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com> | 5 | * Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com> |
| 6 | * Copyright (C) 2006-2010 Nokia Corporation | 6 | * Copyright (C) 2006-2010 Nokia Corporation |
| 7 | * Copyright (c) 2016 Intel Corporation. | 7 | * Copyright (c) 2016 Intel Corporation. |
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index af4884ba6b7c..15508df24e5d 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c | |||
| @@ -398,7 +398,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p) | |||
| 398 | gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, | 398 | gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, |
| 399 | GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay); | 399 | GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay); |
| 400 | gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, | 400 | gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, |
| 401 | GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay); | 401 | GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay); |
| 402 | gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, | 402 | gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, |
| 403 | GPMC_CONFIG6_CYCLE2CYCLESAMECSEN, | 403 | GPMC_CONFIG6_CYCLE2CYCLESAMECSEN, |
| 404 | p->cycle2cyclesamecsen); | 404 | p->cycle2cyclesamecsen); |
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index eed254da63a8..641c1a566687 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
| @@ -730,7 +730,7 @@ static void mei_cl_wake_all(struct mei_cl *cl) | |||
| 730 | /* synchronized under device mutex */ | 730 | /* synchronized under device mutex */ |
| 731 | if (waitqueue_active(&cl->wait)) { | 731 | if (waitqueue_active(&cl->wait)) { |
| 732 | cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); | 732 | cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); |
| 733 | wake_up_interruptible(&cl->wait); | 733 | wake_up(&cl->wait); |
| 734 | } | 734 | } |
| 735 | } | 735 | } |
| 736 | 736 | ||
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 16baeb51b2bd..ef3618299494 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
| @@ -1147,11 +1147,17 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) | |||
| 1147 | */ | 1147 | */ |
| 1148 | static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) | 1148 | static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) |
| 1149 | { | 1149 | { |
| 1150 | struct kstat stat; | ||
| 1151 | int err, minor; | 1150 | int err, minor; |
| 1151 | struct path path; | ||
| 1152 | struct kstat stat; | ||
| 1152 | 1153 | ||
| 1153 | /* Probably this is an MTD character device node path */ | 1154 | /* Probably this is an MTD character device node path */ |
| 1154 | err = vfs_stat(mtd_dev, &stat); | 1155 | err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path); |
| 1156 | if (err) | ||
| 1157 | return ERR_PTR(err); | ||
| 1158 | |||
| 1159 | err = vfs_getattr(&path, &stat); | ||
| 1160 | path_put(&path); | ||
| 1155 | if (err) | 1161 | if (err) |
| 1156 | return ERR_PTR(err); | 1162 | return ERR_PTR(err); |
| 1157 | 1163 | ||
| @@ -1160,6 +1166,7 @@ static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) | |||
| 1160 | return ERR_PTR(-EINVAL); | 1166 | return ERR_PTR(-EINVAL); |
| 1161 | 1167 | ||
| 1162 | minor = MINOR(stat.rdev); | 1168 | minor = MINOR(stat.rdev); |
| 1169 | |||
| 1163 | if (minor & 1) | 1170 | if (minor & 1) |
| 1164 | /* | 1171 | /* |
| 1165 | * Just do not think the "/dev/mtdrX" devices support is need, | 1172 | * Just do not think the "/dev/mtdrX" devices support is need, |
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 5780dd1ba79d..ebf517271d29 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
| @@ -575,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, | |||
| 575 | int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; | 575 | int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; |
| 576 | struct ubi_volume *vol = ubi->volumes[idx]; | 576 | struct ubi_volume *vol = ubi->volumes[idx]; |
| 577 | struct ubi_vid_hdr *vid_hdr; | 577 | struct ubi_vid_hdr *vid_hdr; |
| 578 | uint32_t crc; | ||
| 578 | 579 | ||
| 579 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 580 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
| 580 | if (!vid_hdr) | 581 | if (!vid_hdr) |
| @@ -599,14 +600,8 @@ retry: | |||
| 599 | goto out_put; | 600 | goto out_put; |
| 600 | } | 601 | } |
| 601 | 602 | ||
| 602 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | 603 | ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC); |
| 603 | err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); | ||
| 604 | if (err) { | ||
| 605 | up_read(&ubi->fm_eba_sem); | ||
| 606 | goto write_error; | ||
| 607 | } | ||
| 608 | 604 | ||
| 609 | data_size = offset + len; | ||
| 610 | mutex_lock(&ubi->buf_mutex); | 605 | mutex_lock(&ubi->buf_mutex); |
| 611 | memset(ubi->peb_buf + offset, 0xFF, len); | 606 | memset(ubi->peb_buf + offset, 0xFF, len); |
| 612 | 607 | ||
| @@ -621,6 +616,19 @@ retry: | |||
| 621 | 616 | ||
| 622 | memcpy(ubi->peb_buf + offset, buf, len); | 617 | memcpy(ubi->peb_buf + offset, buf, len); |
| 623 | 618 | ||
| 619 | data_size = offset + len; | ||
| 620 | crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size); | ||
| 621 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | ||
| 622 | vid_hdr->copy_flag = 1; | ||
| 623 | vid_hdr->data_size = cpu_to_be32(data_size); | ||
| 624 | vid_hdr->data_crc = cpu_to_be32(crc); | ||
| 625 | err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); | ||
| 626 | if (err) { | ||
| 627 | mutex_unlock(&ubi->buf_mutex); | ||
| 628 | up_read(&ubi->fm_eba_sem); | ||
| 629 | goto write_error; | ||
| 630 | } | ||
| 631 | |||
| 624 | err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); | 632 | err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); |
| 625 | if (err) { | 633 | if (err) { |
| 626 | mutex_unlock(&ubi->buf_mutex); | 634 | mutex_unlock(&ubi->buf_mutex); |
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 348dbbcbedc8..a9e2cef7c95c 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c | |||
| @@ -302,6 +302,7 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm); | |||
| 302 | struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) | 302 | struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) |
| 303 | { | 303 | { |
| 304 | int error, ubi_num, vol_id; | 304 | int error, ubi_num, vol_id; |
| 305 | struct path path; | ||
| 305 | struct kstat stat; | 306 | struct kstat stat; |
| 306 | 307 | ||
| 307 | dbg_gen("open volume %s, mode %d", pathname, mode); | 308 | dbg_gen("open volume %s, mode %d", pathname, mode); |
| @@ -309,7 +310,12 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) | |||
| 309 | if (!pathname || !*pathname) | 310 | if (!pathname || !*pathname) |
| 310 | return ERR_PTR(-EINVAL); | 311 | return ERR_PTR(-EINVAL); |
| 311 | 312 | ||
| 312 | error = vfs_stat(pathname, &stat); | 313 | error = kern_path(pathname, LOOKUP_FOLLOW, &path); |
| 314 | if (error) | ||
| 315 | return ERR_PTR(error); | ||
| 316 | |||
| 317 | error = vfs_getattr(&path, &stat); | ||
| 318 | path_put(&path); | ||
| 313 | if (error) | 319 | if (error) |
| 314 | return ERR_PTR(error); | 320 | return ERR_PTR(error); |
| 315 | 321 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c5fe915870ad..a59d55e25d5f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -12895,52 +12895,71 @@ static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add) | |||
| 12895 | return rc; | 12895 | return rc; |
| 12896 | } | 12896 | } |
| 12897 | 12897 | ||
| 12898 | int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) | 12898 | static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp) |
| 12899 | { | 12899 | { |
| 12900 | struct bnx2x_vlan_entry *vlan; | 12900 | struct bnx2x_vlan_entry *vlan; |
| 12901 | int rc = 0; | 12901 | int rc = 0; |
| 12902 | 12902 | ||
| 12903 | if (!bp->vlan_cnt) { | 12903 | /* Configure all non-configured entries */ |
| 12904 | DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n"); | ||
| 12905 | return 0; | ||
| 12906 | } | ||
| 12907 | |||
| 12908 | list_for_each_entry(vlan, &bp->vlan_reg, link) { | 12904 | list_for_each_entry(vlan, &bp->vlan_reg, link) { |
| 12909 | /* Prepare for cleanup in case of errors */ | 12905 | if (vlan->hw) |
| 12910 | if (rc) { | ||
| 12911 | vlan->hw = false; | ||
| 12912 | continue; | ||
| 12913 | } | ||
| 12914 | |||
| 12915 | if (!vlan->hw) | ||
| 12916 | continue; | 12906 | continue; |
| 12917 | 12907 | ||
| 12918 | DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid); | 12908 | if (bp->vlan_cnt >= bp->vlan_credit) |
| 12909 | return -ENOBUFS; | ||
| 12919 | 12910 | ||
| 12920 | rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); | 12911 | rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); |
| 12921 | if (rc) { | 12912 | if (rc) { |
| 12922 | BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid); | 12913 | BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid); |
| 12923 | vlan->hw = false; | 12914 | return rc; |
| 12924 | rc = -EINVAL; | ||
| 12925 | continue; | ||
| 12926 | } | 12915 | } |
| 12916 | |||
| 12917 | DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid); | ||
| 12918 | vlan->hw = true; | ||
| 12919 | bp->vlan_cnt++; | ||
| 12927 | } | 12920 | } |
| 12928 | 12921 | ||
| 12929 | return rc; | 12922 | return 0; |
| 12923 | } | ||
| 12924 | |||
| 12925 | static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode) | ||
| 12926 | { | ||
| 12927 | bool need_accept_any_vlan; | ||
| 12928 | |||
| 12929 | need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp); | ||
| 12930 | |||
| 12931 | if (bp->accept_any_vlan != need_accept_any_vlan) { | ||
| 12932 | bp->accept_any_vlan = need_accept_any_vlan; | ||
| 12933 | DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n", | ||
| 12934 | bp->accept_any_vlan ? "raised" : "cleared"); | ||
| 12935 | if (set_rx_mode) { | ||
| 12936 | if (IS_PF(bp)) | ||
| 12937 | bnx2x_set_rx_mode_inner(bp); | ||
| 12938 | else | ||
| 12939 | bnx2x_vfpf_storm_rx_mode(bp); | ||
| 12940 | } | ||
| 12941 | } | ||
| 12942 | } | ||
| 12943 | |||
| 12944 | int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) | ||
| 12945 | { | ||
| 12946 | struct bnx2x_vlan_entry *vlan; | ||
| 12947 | |||
| 12948 | /* The hw forgot all entries after reload */ | ||
| 12949 | list_for_each_entry(vlan, &bp->vlan_reg, link) | ||
| 12950 | vlan->hw = false; | ||
| 12951 | bp->vlan_cnt = 0; | ||
| 12952 | |||
| 12953 | /* Don't set rx mode here. Our caller will do it. */ | ||
| 12954 | bnx2x_vlan_configure(bp, false); | ||
| 12955 | |||
| 12956 | return 0; | ||
| 12930 | } | 12957 | } |
| 12931 | 12958 | ||
| 12932 | static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) | 12959 | static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) |
| 12933 | { | 12960 | { |
| 12934 | struct bnx2x *bp = netdev_priv(dev); | 12961 | struct bnx2x *bp = netdev_priv(dev); |
| 12935 | struct bnx2x_vlan_entry *vlan; | 12962 | struct bnx2x_vlan_entry *vlan; |
| 12936 | bool hw = false; | ||
| 12937 | int rc = 0; | ||
| 12938 | |||
| 12939 | if (!netif_running(bp->dev)) { | ||
| 12940 | DP(NETIF_MSG_IFUP, | ||
| 12941 | "Ignoring VLAN configuration the interface is down\n"); | ||
| 12942 | return -EFAULT; | ||
| 12943 | } | ||
| 12944 | 12963 | ||
| 12945 | DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); | 12964 | DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); |
| 12946 | 12965 | ||
| @@ -12948,93 +12967,47 @@ static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) | |||
| 12948 | if (!vlan) | 12967 | if (!vlan) |
| 12949 | return -ENOMEM; | 12968 | return -ENOMEM; |
| 12950 | 12969 | ||
| 12951 | bp->vlan_cnt++; | ||
| 12952 | if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) { | ||
| 12953 | DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n"); | ||
| 12954 | bp->accept_any_vlan = true; | ||
| 12955 | if (IS_PF(bp)) | ||
| 12956 | bnx2x_set_rx_mode_inner(bp); | ||
| 12957 | else | ||
| 12958 | bnx2x_vfpf_storm_rx_mode(bp); | ||
| 12959 | } else if (bp->vlan_cnt <= bp->vlan_credit) { | ||
| 12960 | rc = __bnx2x_vlan_configure_vid(bp, vid, true); | ||
| 12961 | hw = true; | ||
| 12962 | } | ||
| 12963 | |||
| 12964 | vlan->vid = vid; | 12970 | vlan->vid = vid; |
| 12965 | vlan->hw = hw; | 12971 | vlan->hw = false; |
| 12972 | list_add_tail(&vlan->link, &bp->vlan_reg); | ||
| 12966 | 12973 | ||
| 12967 | if (!rc) { | 12974 | if (netif_running(dev)) |
| 12968 | list_add(&vlan->link, &bp->vlan_reg); | 12975 | bnx2x_vlan_configure(bp, true); |
| 12969 | } else { | ||
| 12970 | bp->vlan_cnt--; | ||
| 12971 | kfree(vlan); | ||
| 12972 | } | ||
| 12973 | |||
| 12974 | DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc); | ||
| 12975 | 12976 | ||
| 12976 | return rc; | 12977 | return 0; |
| 12977 | } | 12978 | } |
| 12978 | 12979 | ||
| 12979 | static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) | 12980 | static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) |
| 12980 | { | 12981 | { |
| 12981 | struct bnx2x *bp = netdev_priv(dev); | 12982 | struct bnx2x *bp = netdev_priv(dev); |
| 12982 | struct bnx2x_vlan_entry *vlan; | 12983 | struct bnx2x_vlan_entry *vlan; |
| 12984 | bool found = false; | ||
| 12983 | int rc = 0; | 12985 | int rc = 0; |
| 12984 | 12986 | ||
| 12985 | if (!netif_running(bp->dev)) { | ||
| 12986 | DP(NETIF_MSG_IFUP, | ||
| 12987 | "Ignoring VLAN configuration the interface is down\n"); | ||
| 12988 | return -EFAULT; | ||
| 12989 | } | ||
| 12990 | |||
| 12991 | DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); | 12987 | DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); |
| 12992 | 12988 | ||
| 12993 | if (!bp->vlan_cnt) { | ||
| 12994 | BNX2X_ERR("Unable to kill VLAN %d\n", vid); | ||
| 12995 | return -EINVAL; | ||
| 12996 | } | ||
| 12997 | |||
| 12998 | list_for_each_entry(vlan, &bp->vlan_reg, link) | 12989 | list_for_each_entry(vlan, &bp->vlan_reg, link) |
| 12999 | if (vlan->vid == vid) | 12990 | if (vlan->vid == vid) { |
| 12991 | found = true; | ||
| 13000 | break; | 12992 | break; |
| 12993 | } | ||
| 13001 | 12994 | ||
| 13002 | if (vlan->vid != vid) { | 12995 | if (!found) { |
| 13003 | BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); | 12996 | BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); |
| 13004 | return -EINVAL; | 12997 | return -EINVAL; |
| 13005 | } | 12998 | } |
| 13006 | 12999 | ||
| 13007 | if (vlan->hw) | 13000 | if (netif_running(dev) && vlan->hw) { |
| 13008 | rc = __bnx2x_vlan_configure_vid(bp, vid, false); | 13001 | rc = __bnx2x_vlan_configure_vid(bp, vid, false); |
| 13002 | DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid); | ||
| 13003 | bp->vlan_cnt--; | ||
| 13004 | } | ||
| 13009 | 13005 | ||
| 13010 | list_del(&vlan->link); | 13006 | list_del(&vlan->link); |
| 13011 | kfree(vlan); | 13007 | kfree(vlan); |
| 13012 | 13008 | ||
| 13013 | bp->vlan_cnt--; | 13009 | if (netif_running(dev)) |
| 13014 | 13010 | bnx2x_vlan_configure(bp, true); | |
| 13015 | if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) { | ||
| 13016 | /* Configure all non-configured entries */ | ||
| 13017 | list_for_each_entry(vlan, &bp->vlan_reg, link) { | ||
| 13018 | if (vlan->hw) | ||
| 13019 | continue; | ||
| 13020 | |||
| 13021 | rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); | ||
| 13022 | if (rc) { | ||
| 13023 | BNX2X_ERR("Unable to config VLAN %d\n", | ||
| 13024 | vlan->vid); | ||
| 13025 | continue; | ||
| 13026 | } | ||
| 13027 | DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", | ||
| 13028 | vlan->vid); | ||
| 13029 | vlan->hw = true; | ||
| 13030 | } | ||
| 13031 | DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n"); | ||
| 13032 | bp->accept_any_vlan = false; | ||
| 13033 | if (IS_PF(bp)) | ||
| 13034 | bnx2x_set_rx_mode_inner(bp); | ||
| 13035 | else | ||
| 13036 | bnx2x_vfpf_storm_rx_mode(bp); | ||
| 13037 | } | ||
| 13038 | 13011 | ||
| 13039 | DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); | 13012 | DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); |
| 13040 | 13013 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 72a2efff8e49..c777cde85ce4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -286,7 +286,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 286 | cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); | 286 | cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); |
| 287 | txr->tx_prod = prod; | 287 | txr->tx_prod = prod; |
| 288 | 288 | ||
| 289 | tx_buf->is_push = 1; | ||
| 289 | netdev_tx_sent_queue(txq, skb->len); | 290 | netdev_tx_sent_queue(txq, skb->len); |
| 291 | wmb(); /* Sync is_push and byte queue before pushing data */ | ||
| 290 | 292 | ||
| 291 | push_len = (length + sizeof(*tx_push) + 7) / 8; | 293 | push_len = (length + sizeof(*tx_push) + 7) / 8; |
| 292 | if (push_len > 16) { | 294 | if (push_len > 16) { |
| @@ -298,7 +300,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 298 | push_len); | 300 | push_len); |
| 299 | } | 301 | } |
| 300 | 302 | ||
| 301 | tx_buf->is_push = 1; | ||
| 302 | goto tx_done; | 303 | goto tx_done; |
| 303 | } | 304 | } |
| 304 | 305 | ||
| @@ -1112,19 +1113,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, | |||
| 1112 | if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) | 1113 | if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) |
| 1113 | skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); | 1114 | skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); |
| 1114 | 1115 | ||
| 1115 | if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { | 1116 | if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && |
| 1116 | netdev_features_t features = skb->dev->features; | 1117 | (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
| 1117 | u16 vlan_proto = tpa_info->metadata >> | 1118 | u16 vlan_proto = tpa_info->metadata >> |
| 1118 | RX_CMP_FLAGS2_METADATA_TPID_SFT; | 1119 | RX_CMP_FLAGS2_METADATA_TPID_SFT; |
| 1120 | u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; | ||
| 1119 | 1121 | ||
| 1120 | if (((features & NETIF_F_HW_VLAN_CTAG_RX) && | 1122 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); |
| 1121 | vlan_proto == ETH_P_8021Q) || | ||
| 1122 | ((features & NETIF_F_HW_VLAN_STAG_RX) && | ||
| 1123 | vlan_proto == ETH_P_8021AD)) { | ||
| 1124 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), | ||
| 1125 | tpa_info->metadata & | ||
| 1126 | RX_CMP_FLAGS2_METADATA_VID_MASK); | ||
| 1127 | } | ||
| 1128 | } | 1123 | } |
| 1129 | 1124 | ||
| 1130 | skb_checksum_none_assert(skb); | 1125 | skb_checksum_none_assert(skb); |
| @@ -1277,19 +1272,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, | |||
| 1277 | 1272 | ||
| 1278 | skb->protocol = eth_type_trans(skb, dev); | 1273 | skb->protocol = eth_type_trans(skb, dev); |
| 1279 | 1274 | ||
| 1280 | if (rxcmp1->rx_cmp_flags2 & | 1275 | if ((rxcmp1->rx_cmp_flags2 & |
| 1281 | cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) { | 1276 | cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && |
| 1282 | netdev_features_t features = skb->dev->features; | 1277 | (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
| 1283 | u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); | 1278 | u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); |
| 1279 | u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; | ||
| 1284 | u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; | 1280 | u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; |
| 1285 | 1281 | ||
| 1286 | if (((features & NETIF_F_HW_VLAN_CTAG_RX) && | 1282 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); |
| 1287 | vlan_proto == ETH_P_8021Q) || | ||
| 1288 | ((features & NETIF_F_HW_VLAN_STAG_RX) && | ||
| 1289 | vlan_proto == ETH_P_8021AD)) | ||
| 1290 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), | ||
| 1291 | meta_data & | ||
| 1292 | RX_CMP_FLAGS2_METADATA_VID_MASK); | ||
| 1293 | } | 1283 | } |
| 1294 | 1284 | ||
| 1295 | skb_checksum_none_assert(skb); | 1285 | skb_checksum_none_assert(skb); |
| @@ -5466,6 +5456,20 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, | |||
| 5466 | 5456 | ||
| 5467 | if (!bnxt_rfs_capable(bp)) | 5457 | if (!bnxt_rfs_capable(bp)) |
| 5468 | features &= ~NETIF_F_NTUPLE; | 5458 | features &= ~NETIF_F_NTUPLE; |
| 5459 | |||
| 5460 | /* Both CTAG and STAG VLAN accelaration on the RX side have to be | ||
| 5461 | * turned on or off together. | ||
| 5462 | */ | ||
| 5463 | if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != | ||
| 5464 | (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { | ||
| 5465 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) | ||
| 5466 | features &= ~(NETIF_F_HW_VLAN_CTAG_RX | | ||
| 5467 | NETIF_F_HW_VLAN_STAG_RX); | ||
| 5468 | else | ||
| 5469 | features |= NETIF_F_HW_VLAN_CTAG_RX | | ||
| 5470 | NETIF_F_HW_VLAN_STAG_RX; | ||
| 5471 | } | ||
| 5472 | |||
| 5469 | return features; | 5473 | return features; |
| 5470 | } | 5474 | } |
| 5471 | 5475 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index a2cdfc1261dc..50812a1d67bd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | |||
| @@ -144,6 +144,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN | |||
| 144 | CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ | 144 | CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ |
| 145 | CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ | 145 | CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ |
| 146 | CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ | 146 | CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ |
| 147 | CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */ | ||
| 147 | CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ | 148 | CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ |
| 148 | CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ | 149 | CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ |
| 149 | CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ | 150 | CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ |
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 41b010645100..4edb98c3c6c7 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c | |||
| @@ -1195,7 +1195,7 @@ static int ethoc_probe(struct platform_device *pdev) | |||
| 1195 | priv->mdio = mdiobus_alloc(); | 1195 | priv->mdio = mdiobus_alloc(); |
| 1196 | if (!priv->mdio) { | 1196 | if (!priv->mdio) { |
| 1197 | ret = -ENOMEM; | 1197 | ret = -ENOMEM; |
| 1198 | goto free; | 1198 | goto free2; |
| 1199 | } | 1199 | } |
| 1200 | 1200 | ||
| 1201 | priv->mdio->name = "ethoc-mdio"; | 1201 | priv->mdio->name = "ethoc-mdio"; |
| @@ -1208,7 +1208,7 @@ static int ethoc_probe(struct platform_device *pdev) | |||
| 1208 | ret = mdiobus_register(priv->mdio); | 1208 | ret = mdiobus_register(priv->mdio); |
| 1209 | if (ret) { | 1209 | if (ret) { |
| 1210 | dev_err(&netdev->dev, "failed to register MDIO bus\n"); | 1210 | dev_err(&netdev->dev, "failed to register MDIO bus\n"); |
| 1211 | goto free; | 1211 | goto free2; |
| 1212 | } | 1212 | } |
| 1213 | 1213 | ||
| 1214 | ret = ethoc_mdio_probe(netdev); | 1214 | ret = ethoc_mdio_probe(netdev); |
| @@ -1241,9 +1241,10 @@ error2: | |||
| 1241 | error: | 1241 | error: |
| 1242 | mdiobus_unregister(priv->mdio); | 1242 | mdiobus_unregister(priv->mdio); |
| 1243 | mdiobus_free(priv->mdio); | 1243 | mdiobus_free(priv->mdio); |
| 1244 | free: | 1244 | free2: |
| 1245 | if (priv->clk) | 1245 | if (priv->clk) |
| 1246 | clk_disable_unprepare(priv->clk); | 1246 | clk_disable_unprepare(priv->clk); |
| 1247 | free: | ||
| 1247 | free_netdev(netdev); | 1248 | free_netdev(netdev); |
| 1248 | out: | 1249 | out: |
| 1249 | return ret; | 1250 | return ret; |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3c0255e98535..fea0f330ddbd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -2416,24 +2416,24 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) | |||
| 2416 | return -EOPNOTSUPP; | 2416 | return -EOPNOTSUPP; |
| 2417 | 2417 | ||
| 2418 | if (ec->rx_max_coalesced_frames > 255) { | 2418 | if (ec->rx_max_coalesced_frames > 255) { |
| 2419 | pr_err("Rx coalesced frames exceed hardware limiation"); | 2419 | pr_err("Rx coalesced frames exceed hardware limitation\n"); |
| 2420 | return -EINVAL; | 2420 | return -EINVAL; |
| 2421 | } | 2421 | } |
| 2422 | 2422 | ||
| 2423 | if (ec->tx_max_coalesced_frames > 255) { | 2423 | if (ec->tx_max_coalesced_frames > 255) { |
| 2424 | pr_err("Tx coalesced frame exceed hardware limiation"); | 2424 | pr_err("Tx coalesced frame exceed hardware limitation\n"); |
| 2425 | return -EINVAL; | 2425 | return -EINVAL; |
| 2426 | } | 2426 | } |
| 2427 | 2427 | ||
| 2428 | cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); | 2428 | cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); |
| 2429 | if (cycle > 0xFFFF) { | 2429 | if (cycle > 0xFFFF) { |
| 2430 | pr_err("Rx coalesed usec exceeed hardware limiation"); | 2430 | pr_err("Rx coalesced usec exceed hardware limitation\n"); |
| 2431 | return -EINVAL; | 2431 | return -EINVAL; |
| 2432 | } | 2432 | } |
| 2433 | 2433 | ||
| 2434 | cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); | 2434 | cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); |
| 2435 | if (cycle > 0xFFFF) { | 2435 | if (cycle > 0xFFFF) { |
| 2436 | pr_err("Rx coalesed usec exceeed hardware limiation"); | 2436 | pr_err("Rx coalesced usec exceed hardware limitation\n"); |
| 2437 | return -EINVAL; | 2437 | return -EINVAL; |
| 2438 | } | 2438 | } |
| 2439 | 2439 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 7615e0668acb..2e6785b6e8be 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
| @@ -2440,7 +2440,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2440 | tx_queue->tx_ring_size); | 2440 | tx_queue->tx_ring_size); |
| 2441 | 2441 | ||
| 2442 | if (likely(!nr_frags)) { | 2442 | if (likely(!nr_frags)) { |
| 2443 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | 2443 | if (likely(!do_tstamp)) |
| 2444 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | ||
| 2444 | } else { | 2445 | } else { |
| 2445 | u32 lstatus_start = lstatus; | 2446 | u32 lstatus_start = lstatus; |
| 2446 | 2447 | ||
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c984462fad2a..4763252bbf85 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
| @@ -133,6 +133,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) | |||
| 133 | static void mtk_phy_link_adjust(struct net_device *dev) | 133 | static void mtk_phy_link_adjust(struct net_device *dev) |
| 134 | { | 134 | { |
| 135 | struct mtk_mac *mac = netdev_priv(dev); | 135 | struct mtk_mac *mac = netdev_priv(dev); |
| 136 | u16 lcl_adv = 0, rmt_adv = 0; | ||
| 137 | u8 flowctrl; | ||
| 136 | u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | | 138 | u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | |
| 137 | MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | | 139 | MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | |
| 138 | MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | | 140 | MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | |
| @@ -150,11 +152,30 @@ static void mtk_phy_link_adjust(struct net_device *dev) | |||
| 150 | if (mac->phy_dev->link) | 152 | if (mac->phy_dev->link) |
| 151 | mcr |= MAC_MCR_FORCE_LINK; | 153 | mcr |= MAC_MCR_FORCE_LINK; |
| 152 | 154 | ||
| 153 | if (mac->phy_dev->duplex) | 155 | if (mac->phy_dev->duplex) { |
| 154 | mcr |= MAC_MCR_FORCE_DPX; | 156 | mcr |= MAC_MCR_FORCE_DPX; |
| 155 | 157 | ||
| 156 | if (mac->phy_dev->pause) | 158 | if (mac->phy_dev->pause) |
| 157 | mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC; | 159 | rmt_adv = LPA_PAUSE_CAP; |
| 160 | if (mac->phy_dev->asym_pause) | ||
| 161 | rmt_adv |= LPA_PAUSE_ASYM; | ||
| 162 | |||
| 163 | if (mac->phy_dev->advertising & ADVERTISED_Pause) | ||
| 164 | lcl_adv |= ADVERTISE_PAUSE_CAP; | ||
| 165 | if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause) | ||
| 166 | lcl_adv |= ADVERTISE_PAUSE_ASYM; | ||
| 167 | |||
| 168 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); | ||
| 169 | |||
| 170 | if (flowctrl & FLOW_CTRL_TX) | ||
| 171 | mcr |= MAC_MCR_FORCE_TX_FC; | ||
| 172 | if (flowctrl & FLOW_CTRL_RX) | ||
| 173 | mcr |= MAC_MCR_FORCE_RX_FC; | ||
| 174 | |||
| 175 | netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n", | ||
| 176 | flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled", | ||
| 177 | flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled"); | ||
| 178 | } | ||
| 158 | 179 | ||
| 159 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); | 180 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); |
| 160 | 181 | ||
| @@ -208,10 +229,16 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
| 208 | u32 val, ge_mode; | 229 | u32 val, ge_mode; |
| 209 | 230 | ||
| 210 | np = of_parse_phandle(mac->of_node, "phy-handle", 0); | 231 | np = of_parse_phandle(mac->of_node, "phy-handle", 0); |
| 232 | if (!np && of_phy_is_fixed_link(mac->of_node)) | ||
| 233 | if (!of_phy_register_fixed_link(mac->of_node)) | ||
| 234 | np = of_node_get(mac->of_node); | ||
| 211 | if (!np) | 235 | if (!np) |
| 212 | return -ENODEV; | 236 | return -ENODEV; |
| 213 | 237 | ||
| 214 | switch (of_get_phy_mode(np)) { | 238 | switch (of_get_phy_mode(np)) { |
| 239 | case PHY_INTERFACE_MODE_RGMII_TXID: | ||
| 240 | case PHY_INTERFACE_MODE_RGMII_RXID: | ||
| 241 | case PHY_INTERFACE_MODE_RGMII_ID: | ||
| 215 | case PHY_INTERFACE_MODE_RGMII: | 242 | case PHY_INTERFACE_MODE_RGMII: |
| 216 | ge_mode = 0; | 243 | ge_mode = 0; |
| 217 | break; | 244 | break; |
| @@ -236,7 +263,8 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
| 236 | mac->phy_dev->autoneg = AUTONEG_ENABLE; | 263 | mac->phy_dev->autoneg = AUTONEG_ENABLE; |
| 237 | mac->phy_dev->speed = 0; | 264 | mac->phy_dev->speed = 0; |
| 238 | mac->phy_dev->duplex = 0; | 265 | mac->phy_dev->duplex = 0; |
| 239 | mac->phy_dev->supported &= PHY_BASIC_FEATURES; | 266 | mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | |
| 267 | SUPPORTED_Asym_Pause; | ||
| 240 | mac->phy_dev->advertising = mac->phy_dev->supported | | 268 | mac->phy_dev->advertising = mac->phy_dev->supported | |
| 241 | ADVERTISED_Autoneg; | 269 | ADVERTISED_Autoneg; |
| 242 | phy_start_aneg(mac->phy_dev); | 270 | phy_start_aneg(mac->phy_dev); |
| @@ -280,7 +308,7 @@ static int mtk_mdio_init(struct mtk_eth *eth) | |||
| 280 | return 0; | 308 | return 0; |
| 281 | 309 | ||
| 282 | err_free_bus: | 310 | err_free_bus: |
| 283 | kfree(eth->mii_bus); | 311 | mdiobus_free(eth->mii_bus); |
| 284 | 312 | ||
| 285 | err_put_node: | 313 | err_put_node: |
| 286 | of_node_put(mii_np); | 314 | of_node_put(mii_np); |
| @@ -295,7 +323,7 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth) | |||
| 295 | 323 | ||
| 296 | mdiobus_unregister(eth->mii_bus); | 324 | mdiobus_unregister(eth->mii_bus); |
| 297 | of_node_put(eth->mii_bus->dev.of_node); | 325 | of_node_put(eth->mii_bus->dev.of_node); |
| 298 | kfree(eth->mii_bus); | 326 | mdiobus_free(eth->mii_bus); |
| 299 | } | 327 | } |
| 300 | 328 | ||
| 301 | static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) | 329 | static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index fd4392999eee..f5c8d5db25a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -3192,10 +3192,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) | |||
| 3192 | flush_workqueue(priv->wq); | 3192 | flush_workqueue(priv->wq); |
| 3193 | if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { | 3193 | if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { |
| 3194 | netif_device_detach(netdev); | 3194 | netif_device_detach(netdev); |
| 3195 | mutex_lock(&priv->state_lock); | 3195 | mlx5e_close(netdev); |
| 3196 | if (test_bit(MLX5E_STATE_OPENED, &priv->state)) | ||
| 3197 | mlx5e_close_locked(netdev); | ||
| 3198 | mutex_unlock(&priv->state_lock); | ||
| 3199 | } else { | 3196 | } else { |
| 3200 | unregister_netdev(netdev); | 3197 | unregister_netdev(netdev); |
| 3201 | } | 3198 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 229ab16fb8d3..b000ddc29553 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
| @@ -317,7 +317,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) | |||
| 317 | while ((sq->pc & wq->sz_m1) > sq->edge) | 317 | while ((sq->pc & wq->sz_m1) > sq->edge) |
| 318 | mlx5e_send_nop(sq, false); | 318 | mlx5e_send_nop(sq, false); |
| 319 | 319 | ||
| 320 | sq->bf_budget = bf ? sq->bf_budget - 1 : 0; | 320 | if (bf) |
| 321 | sq->bf_budget--; | ||
| 321 | 322 | ||
| 322 | sq->stats.packets++; | 323 | sq->stats.packets++; |
| 323 | sq->stats.bytes += num_bytes; | 324 | sq->stats.bytes += num_bytes; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index b84a6918a700..aebbd6ccb9fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
| @@ -383,7 +383,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, | |||
| 383 | match_v, | 383 | match_v, |
| 384 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | 384 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, |
| 385 | 0, &dest); | 385 | 0, &dest); |
| 386 | if (IS_ERR_OR_NULL(flow_rule)) { | 386 | if (IS_ERR(flow_rule)) { |
| 387 | pr_warn( | 387 | pr_warn( |
| 388 | "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", | 388 | "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", |
| 389 | dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); | 389 | dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); |
| @@ -457,7 +457,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 457 | 457 | ||
| 458 | table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); | 458 | table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); |
| 459 | fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0); | 459 | fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0); |
| 460 | if (IS_ERR_OR_NULL(fdb)) { | 460 | if (IS_ERR(fdb)) { |
| 461 | err = PTR_ERR(fdb); | 461 | err = PTR_ERR(fdb); |
| 462 | esw_warn(dev, "Failed to create FDB Table err %d\n", err); | 462 | esw_warn(dev, "Failed to create FDB Table err %d\n", err); |
| 463 | goto out; | 463 | goto out; |
| @@ -474,7 +474,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 474 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); | 474 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); |
| 475 | eth_broadcast_addr(dmac); | 475 | eth_broadcast_addr(dmac); |
| 476 | g = mlx5_create_flow_group(fdb, flow_group_in); | 476 | g = mlx5_create_flow_group(fdb, flow_group_in); |
| 477 | if (IS_ERR_OR_NULL(g)) { | 477 | if (IS_ERR(g)) { |
| 478 | err = PTR_ERR(g); | 478 | err = PTR_ERR(g); |
| 479 | esw_warn(dev, "Failed to create flow group err(%d)\n", err); | 479 | esw_warn(dev, "Failed to create flow group err(%d)\n", err); |
| 480 | goto out; | 480 | goto out; |
| @@ -489,7 +489,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 489 | eth_zero_addr(dmac); | 489 | eth_zero_addr(dmac); |
| 490 | dmac[0] = 0x01; | 490 | dmac[0] = 0x01; |
| 491 | g = mlx5_create_flow_group(fdb, flow_group_in); | 491 | g = mlx5_create_flow_group(fdb, flow_group_in); |
| 492 | if (IS_ERR_OR_NULL(g)) { | 492 | if (IS_ERR(g)) { |
| 493 | err = PTR_ERR(g); | 493 | err = PTR_ERR(g); |
| 494 | esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); | 494 | esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); |
| 495 | goto out; | 495 | goto out; |
| @@ -506,7 +506,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 506 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); | 506 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); |
| 507 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); | 507 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); |
| 508 | g = mlx5_create_flow_group(fdb, flow_group_in); | 508 | g = mlx5_create_flow_group(fdb, flow_group_in); |
| 509 | if (IS_ERR_OR_NULL(g)) { | 509 | if (IS_ERR(g)) { |
| 510 | err = PTR_ERR(g); | 510 | err = PTR_ERR(g); |
| 511 | esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); | 511 | esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); |
| 512 | goto out; | 512 | goto out; |
| @@ -529,7 +529,7 @@ out: | |||
| 529 | } | 529 | } |
| 530 | } | 530 | } |
| 531 | 531 | ||
| 532 | kfree(flow_group_in); | 532 | kvfree(flow_group_in); |
| 533 | return err; | 533 | return err; |
| 534 | } | 534 | } |
| 535 | 535 | ||
| @@ -651,6 +651,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw, | |||
| 651 | esw_fdb_set_vport_rule(esw, | 651 | esw_fdb_set_vport_rule(esw, |
| 652 | mac, | 652 | mac, |
| 653 | vport_idx); | 653 | vport_idx); |
| 654 | iter_vaddr->mc_promisc = true; | ||
| 654 | break; | 655 | break; |
| 655 | case MLX5_ACTION_DEL: | 656 | case MLX5_ACTION_DEL: |
| 656 | if (!iter_vaddr) | 657 | if (!iter_vaddr) |
| @@ -1060,7 +1061,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1060 | return; | 1061 | return; |
| 1061 | 1062 | ||
| 1062 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); | 1063 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); |
| 1063 | if (IS_ERR_OR_NULL(acl)) { | 1064 | if (IS_ERR(acl)) { |
| 1064 | err = PTR_ERR(acl); | 1065 | err = PTR_ERR(acl); |
| 1065 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n", | 1066 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n", |
| 1066 | vport->vport, err); | 1067 | vport->vport, err); |
| @@ -1075,7 +1076,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1075 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); | 1076 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); |
| 1076 | 1077 | ||
| 1077 | vlan_grp = mlx5_create_flow_group(acl, flow_group_in); | 1078 | vlan_grp = mlx5_create_flow_group(acl, flow_group_in); |
| 1078 | if (IS_ERR_OR_NULL(vlan_grp)) { | 1079 | if (IS_ERR(vlan_grp)) { |
| 1079 | err = PTR_ERR(vlan_grp); | 1080 | err = PTR_ERR(vlan_grp); |
| 1080 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n", | 1081 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n", |
| 1081 | vport->vport, err); | 1082 | vport->vport, err); |
| @@ -1086,7 +1087,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1086 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); | 1087 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); |
| 1087 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); | 1088 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); |
| 1088 | drop_grp = mlx5_create_flow_group(acl, flow_group_in); | 1089 | drop_grp = mlx5_create_flow_group(acl, flow_group_in); |
| 1089 | if (IS_ERR_OR_NULL(drop_grp)) { | 1090 | if (IS_ERR(drop_grp)) { |
| 1090 | err = PTR_ERR(drop_grp); | 1091 | err = PTR_ERR(drop_grp); |
| 1091 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", | 1092 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", |
| 1092 | vport->vport, err); | 1093 | vport->vport, err); |
| @@ -1097,7 +1098,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1097 | vport->egress.drop_grp = drop_grp; | 1098 | vport->egress.drop_grp = drop_grp; |
| 1098 | vport->egress.allowed_vlans_grp = vlan_grp; | 1099 | vport->egress.allowed_vlans_grp = vlan_grp; |
| 1099 | out: | 1100 | out: |
| 1100 | kfree(flow_group_in); | 1101 | kvfree(flow_group_in); |
| 1101 | if (err && !IS_ERR_OR_NULL(vlan_grp)) | 1102 | if (err && !IS_ERR_OR_NULL(vlan_grp)) |
| 1102 | mlx5_destroy_flow_group(vlan_grp); | 1103 | mlx5_destroy_flow_group(vlan_grp); |
| 1103 | if (err && !IS_ERR_OR_NULL(acl)) | 1104 | if (err && !IS_ERR_OR_NULL(acl)) |
| @@ -1174,7 +1175,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1174 | return; | 1175 | return; |
| 1175 | 1176 | ||
| 1176 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); | 1177 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); |
| 1177 | if (IS_ERR_OR_NULL(acl)) { | 1178 | if (IS_ERR(acl)) { |
| 1178 | err = PTR_ERR(acl); | 1179 | err = PTR_ERR(acl); |
| 1179 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", | 1180 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", |
| 1180 | vport->vport, err); | 1181 | vport->vport, err); |
| @@ -1192,7 +1193,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1192 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); | 1193 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); |
| 1193 | 1194 | ||
| 1194 | g = mlx5_create_flow_group(acl, flow_group_in); | 1195 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1195 | if (IS_ERR_OR_NULL(g)) { | 1196 | if (IS_ERR(g)) { |
| 1196 | err = PTR_ERR(g); | 1197 | err = PTR_ERR(g); |
| 1197 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", | 1198 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", |
| 1198 | vport->vport, err); | 1199 | vport->vport, err); |
| @@ -1207,7 +1208,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1207 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); | 1208 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); |
| 1208 | 1209 | ||
| 1209 | g = mlx5_create_flow_group(acl, flow_group_in); | 1210 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1210 | if (IS_ERR_OR_NULL(g)) { | 1211 | if (IS_ERR(g)) { |
| 1211 | err = PTR_ERR(g); | 1212 | err = PTR_ERR(g); |
| 1212 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", | 1213 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", |
| 1213 | vport->vport, err); | 1214 | vport->vport, err); |
| @@ -1223,7 +1224,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1223 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); | 1224 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); |
| 1224 | 1225 | ||
| 1225 | g = mlx5_create_flow_group(acl, flow_group_in); | 1226 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1226 | if (IS_ERR_OR_NULL(g)) { | 1227 | if (IS_ERR(g)) { |
| 1227 | err = PTR_ERR(g); | 1228 | err = PTR_ERR(g); |
| 1228 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", | 1229 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", |
| 1229 | vport->vport, err); | 1230 | vport->vport, err); |
| @@ -1236,7 +1237,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1236 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); | 1237 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); |
| 1237 | 1238 | ||
| 1238 | g = mlx5_create_flow_group(acl, flow_group_in); | 1239 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1239 | if (IS_ERR_OR_NULL(g)) { | 1240 | if (IS_ERR(g)) { |
| 1240 | err = PTR_ERR(g); | 1241 | err = PTR_ERR(g); |
| 1241 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", | 1242 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", |
| 1242 | vport->vport, err); | 1243 | vport->vport, err); |
| @@ -1259,7 +1260,7 @@ out: | |||
| 1259 | mlx5_destroy_flow_table(vport->ingress.acl); | 1260 | mlx5_destroy_flow_table(vport->ingress.acl); |
| 1260 | } | 1261 | } |
| 1261 | 1262 | ||
| 1262 | kfree(flow_group_in); | 1263 | kvfree(flow_group_in); |
| 1263 | } | 1264 | } |
| 1264 | 1265 | ||
| 1265 | static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, | 1266 | static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, |
| @@ -1363,7 +1364,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, | |||
| 1363 | match_v, | 1364 | match_v, |
| 1364 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, | 1365 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, |
| 1365 | 0, NULL); | 1366 | 0, NULL); |
| 1366 | if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) { | 1367 | if (IS_ERR(vport->ingress.allow_rule)) { |
| 1367 | err = PTR_ERR(vport->ingress.allow_rule); | 1368 | err = PTR_ERR(vport->ingress.allow_rule); |
| 1368 | pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", | 1369 | pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", |
| 1369 | vport->vport, err); | 1370 | vport->vport, err); |
| @@ -1380,7 +1381,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, | |||
| 1380 | match_v, | 1381 | match_v, |
| 1381 | MLX5_FLOW_CONTEXT_ACTION_DROP, | 1382 | MLX5_FLOW_CONTEXT_ACTION_DROP, |
| 1382 | 0, NULL); | 1383 | 0, NULL); |
| 1383 | if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) { | 1384 | if (IS_ERR(vport->ingress.drop_rule)) { |
| 1384 | err = PTR_ERR(vport->ingress.drop_rule); | 1385 | err = PTR_ERR(vport->ingress.drop_rule); |
| 1385 | pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", | 1386 | pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", |
| 1386 | vport->vport, err); | 1387 | vport->vport, err); |
| @@ -1439,7 +1440,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, | |||
| 1439 | match_v, | 1440 | match_v, |
| 1440 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, | 1441 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, |
| 1441 | 0, NULL); | 1442 | 0, NULL); |
| 1442 | if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { | 1443 | if (IS_ERR(vport->egress.allowed_vlan)) { |
| 1443 | err = PTR_ERR(vport->egress.allowed_vlan); | 1444 | err = PTR_ERR(vport->egress.allowed_vlan); |
| 1444 | pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", | 1445 | pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", |
| 1445 | vport->vport, err); | 1446 | vport->vport, err); |
| @@ -1457,7 +1458,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, | |||
| 1457 | match_v, | 1458 | match_v, |
| 1458 | MLX5_FLOW_CONTEXT_ACTION_DROP, | 1459 | MLX5_FLOW_CONTEXT_ACTION_DROP, |
| 1459 | 0, NULL); | 1460 | 0, NULL); |
| 1460 | if (IS_ERR_OR_NULL(vport->egress.drop_rule)) { | 1461 | if (IS_ERR(vport->egress.drop_rule)) { |
| 1461 | err = PTR_ERR(vport->egress.drop_rule); | 1462 | err = PTR_ERR(vport->egress.drop_rule); |
| 1462 | pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", | 1463 | pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", |
| 1463 | vport->vport, err); | 1464 | vport->vport, err); |
| @@ -1491,14 +1492,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, | |||
| 1491 | 1492 | ||
| 1492 | /* Sync with current vport context */ | 1493 | /* Sync with current vport context */ |
| 1493 | vport->enabled_events = enable_events; | 1494 | vport->enabled_events = enable_events; |
| 1494 | esw_vport_change_handle_locked(vport); | ||
| 1495 | |||
| 1496 | vport->enabled = true; | 1495 | vport->enabled = true; |
| 1497 | 1496 | ||
| 1498 | /* only PF is trusted by default */ | 1497 | /* only PF is trusted by default */ |
| 1499 | vport->trusted = (vport_num) ? false : true; | 1498 | vport->trusted = (vport_num) ? false : true; |
| 1500 | 1499 | esw_vport_change_handle_locked(vport); | |
| 1501 | arm_vport_context_events_cmd(esw->dev, vport_num, enable_events); | ||
| 1502 | 1500 | ||
| 1503 | esw->enabled_vports++; | 1501 | esw->enabled_vports++; |
| 1504 | esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); | 1502 | esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); |
| @@ -1728,11 +1726,24 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) | |||
| 1728 | (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) | 1726 | (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) |
| 1729 | #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) | 1727 | #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) |
| 1730 | 1728 | ||
| 1729 | static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) | ||
| 1730 | { | ||
| 1731 | ((u8 *)node_guid)[7] = mac[0]; | ||
| 1732 | ((u8 *)node_guid)[6] = mac[1]; | ||
| 1733 | ((u8 *)node_guid)[5] = mac[2]; | ||
| 1734 | ((u8 *)node_guid)[4] = 0xff; | ||
| 1735 | ((u8 *)node_guid)[3] = 0xfe; | ||
| 1736 | ((u8 *)node_guid)[2] = mac[3]; | ||
| 1737 | ((u8 *)node_guid)[1] = mac[4]; | ||
| 1738 | ((u8 *)node_guid)[0] = mac[5]; | ||
| 1739 | } | ||
| 1740 | |||
| 1731 | int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | 1741 | int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, |
| 1732 | int vport, u8 mac[ETH_ALEN]) | 1742 | int vport, u8 mac[ETH_ALEN]) |
| 1733 | { | 1743 | { |
| 1734 | int err = 0; | ||
| 1735 | struct mlx5_vport *evport; | 1744 | struct mlx5_vport *evport; |
| 1745 | u64 node_guid; | ||
| 1746 | int err = 0; | ||
| 1736 | 1747 | ||
| 1737 | if (!ESW_ALLOWED(esw)) | 1748 | if (!ESW_ALLOWED(esw)) |
| 1738 | return -EPERM; | 1749 | return -EPERM; |
| @@ -1756,11 +1767,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | |||
| 1756 | return err; | 1767 | return err; |
| 1757 | } | 1768 | } |
| 1758 | 1769 | ||
| 1770 | node_guid_gen_from_mac(&node_guid, mac); | ||
| 1771 | err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid); | ||
| 1772 | if (err) | ||
| 1773 | mlx5_core_warn(esw->dev, | ||
| 1774 | "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", | ||
| 1775 | vport, err); | ||
| 1776 | |||
| 1759 | mutex_lock(&esw->state_lock); | 1777 | mutex_lock(&esw->state_lock); |
| 1760 | if (evport->enabled) | 1778 | if (evport->enabled) |
| 1761 | err = esw_vport_ingress_config(esw, evport); | 1779 | err = esw_vport_ingress_config(esw, evport); |
| 1762 | mutex_unlock(&esw->state_lock); | 1780 | mutex_unlock(&esw->state_lock); |
| 1763 | |||
| 1764 | return err; | 1781 | return err; |
| 1765 | } | 1782 | } |
| 1766 | 1783 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 8b5f0b2c0d5c..e912a3d2505e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
| @@ -1292,8 +1292,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft) | |||
| 1292 | ft->id); | 1292 | ft->id); |
| 1293 | return err; | 1293 | return err; |
| 1294 | } | 1294 | } |
| 1295 | root->root_ft = new_root_ft; | ||
| 1296 | } | 1295 | } |
| 1296 | root->root_ft = new_root_ft; | ||
| 1297 | return 0; | 1297 | return 0; |
| 1298 | } | 1298 | } |
| 1299 | 1299 | ||
| @@ -1767,6 +1767,9 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev) | |||
| 1767 | 1767 | ||
| 1768 | void mlx5_cleanup_fs(struct mlx5_core_dev *dev) | 1768 | void mlx5_cleanup_fs(struct mlx5_core_dev *dev) |
| 1769 | { | 1769 | { |
| 1770 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) | ||
| 1771 | return; | ||
| 1772 | |||
| 1770 | cleanup_root_ns(dev); | 1773 | cleanup_root_ns(dev); |
| 1771 | cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); | 1774 | cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); |
| 1772 | cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); | 1775 | cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); |
| @@ -1828,29 +1831,36 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) | |||
| 1828 | { | 1831 | { |
| 1829 | int err = 0; | 1832 | int err = 0; |
| 1830 | 1833 | ||
| 1834 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) | ||
| 1835 | return 0; | ||
| 1836 | |||
| 1831 | err = mlx5_init_fc_stats(dev); | 1837 | err = mlx5_init_fc_stats(dev); |
| 1832 | if (err) | 1838 | if (err) |
| 1833 | return err; | 1839 | return err; |
| 1834 | 1840 | ||
| 1835 | if (MLX5_CAP_GEN(dev, nic_flow_table)) { | 1841 | if (MLX5_CAP_GEN(dev, nic_flow_table) && |
| 1842 | MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) { | ||
| 1836 | err = init_root_ns(dev); | 1843 | err = init_root_ns(dev); |
| 1837 | if (err) | 1844 | if (err) |
| 1838 | goto err; | 1845 | goto err; |
| 1839 | } | 1846 | } |
| 1847 | |||
| 1840 | if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { | 1848 | if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { |
| 1841 | err = init_fdb_root_ns(dev); | 1849 | if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { |
| 1842 | if (err) | 1850 | err = init_fdb_root_ns(dev); |
| 1843 | goto err; | 1851 | if (err) |
| 1844 | } | 1852 | goto err; |
| 1845 | if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { | 1853 | } |
| 1846 | err = init_egress_acl_root_ns(dev); | 1854 | if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { |
| 1847 | if (err) | 1855 | err = init_egress_acl_root_ns(dev); |
| 1848 | goto err; | 1856 | if (err) |
| 1849 | } | 1857 | goto err; |
| 1850 | if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { | 1858 | } |
| 1851 | err = init_ingress_acl_root_ns(dev); | 1859 | if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { |
| 1852 | if (err) | 1860 | err = init_ingress_acl_root_ns(dev); |
| 1853 | goto err; | 1861 | if (err) |
| 1862 | goto err; | ||
| 1863 | } | ||
| 1854 | } | 1864 | } |
| 1855 | 1865 | ||
| 1856 | return 0; | 1866 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index b720a274220d..b82d65802d96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c | |||
| @@ -418,7 +418,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) | |||
| 418 | if (out.hdr.status) | 418 | if (out.hdr.status) |
| 419 | err = mlx5_cmd_status_to_err(&out.hdr); | 419 | err = mlx5_cmd_status_to_err(&out.hdr); |
| 420 | else | 420 | else |
| 421 | *xrcdn = be32_to_cpu(out.xrcdn); | 421 | *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff; |
| 422 | 422 | ||
| 423 | return err; | 423 | return err; |
| 424 | } | 424 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index b69dadcfb897..daf44cd4c566 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c | |||
| @@ -508,6 +508,44 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) | |||
| 508 | } | 508 | } |
| 509 | EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); | 509 | EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); |
| 510 | 510 | ||
| 511 | int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, | ||
| 512 | u32 vport, u64 node_guid) | ||
| 513 | { | ||
| 514 | int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); | ||
| 515 | void *nic_vport_context; | ||
| 516 | u8 *guid; | ||
| 517 | void *in; | ||
| 518 | int err; | ||
| 519 | |||
| 520 | if (!vport) | ||
| 521 | return -EINVAL; | ||
| 522 | if (!MLX5_CAP_GEN(mdev, vport_group_manager)) | ||
| 523 | return -EACCES; | ||
| 524 | if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) | ||
| 525 | return -ENOTSUPP; | ||
| 526 | |||
| 527 | in = mlx5_vzalloc(inlen); | ||
| 528 | if (!in) | ||
| 529 | return -ENOMEM; | ||
| 530 | |||
| 531 | MLX5_SET(modify_nic_vport_context_in, in, | ||
| 532 | field_select.node_guid, 1); | ||
| 533 | MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); | ||
| 534 | MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport); | ||
| 535 | |||
| 536 | nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, | ||
| 537 | in, nic_vport_context); | ||
| 538 | guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context, | ||
| 539 | node_guid); | ||
| 540 | MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid); | ||
| 541 | |||
| 542 | err = mlx5_modify_nic_vport_context(mdev, in, inlen); | ||
| 543 | |||
| 544 | kvfree(in); | ||
| 545 | |||
| 546 | return err; | ||
| 547 | } | ||
| 548 | |||
| 511 | int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, | 549 | int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, |
| 512 | u16 *qkey_viol_cntr) | 550 | u16 *qkey_viol_cntr) |
| 513 | { | 551 | { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 4a7273771028..6f9e3ddff4a8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
| @@ -247,15 +247,23 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) | |||
| 247 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); | 247 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); |
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) | 250 | static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
| 251 | u8 swid) | ||
| 251 | { | 252 | { |
| 252 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | ||
| 253 | char pspa_pl[MLXSW_REG_PSPA_LEN]; | 253 | char pspa_pl[MLXSW_REG_PSPA_LEN]; |
| 254 | 254 | ||
| 255 | mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); | 255 | mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); |
| 256 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); | 256 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); |
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) | ||
| 260 | { | ||
| 261 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | ||
| 262 | |||
| 263 | return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port, | ||
| 264 | swid); | ||
| 265 | } | ||
| 266 | |||
| 259 | static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, | 267 | static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, |
| 260 | bool enable) | 268 | bool enable) |
| 261 | { | 269 | { |
| @@ -305,9 +313,9 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) | |||
| 305 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); | 313 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); |
| 306 | } | 314 | } |
| 307 | 315 | ||
| 308 | static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, | 316 | static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, |
| 309 | u8 local_port, u8 *p_module, | 317 | u8 local_port, u8 *p_module, |
| 310 | u8 *p_width, u8 *p_lane) | 318 | u8 *p_width, u8 *p_lane) |
| 311 | { | 319 | { |
| 312 | char pmlp_pl[MLXSW_REG_PMLP_LEN]; | 320 | char pmlp_pl[MLXSW_REG_PMLP_LEN]; |
| 313 | int err; | 321 | int err; |
| @@ -322,16 +330,6 @@ static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, | |||
| 322 | return 0; | 330 | return 0; |
| 323 | } | 331 | } |
| 324 | 332 | ||
| 325 | static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, | ||
| 326 | u8 local_port, u8 *p_module, | ||
| 327 | u8 *p_width) | ||
| 328 | { | ||
| 329 | u8 lane; | ||
| 330 | |||
| 331 | return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module, | ||
| 332 | p_width, &lane); | ||
| 333 | } | ||
| 334 | |||
| 335 | static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, | 333 | static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
| 336 | u8 module, u8 width, u8 lane) | 334 | u8 module, u8 width, u8 lane) |
| 337 | { | 335 | { |
| @@ -949,17 +947,11 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, | |||
| 949 | size_t len) | 947 | size_t len) |
| 950 | { | 948 | { |
| 951 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 949 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
| 952 | u8 module, width, lane; | 950 | u8 module = mlxsw_sp_port->mapping.module; |
| 951 | u8 width = mlxsw_sp_port->mapping.width; | ||
| 952 | u8 lane = mlxsw_sp_port->mapping.lane; | ||
| 953 | int err; | 953 | int err; |
| 954 | 954 | ||
| 955 | err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp, | ||
| 956 | mlxsw_sp_port->local_port, | ||
| 957 | &module, &width, &lane); | ||
| 958 | if (err) { | ||
| 959 | netdev_err(dev, "Failed to retrieve module information\n"); | ||
| 960 | return err; | ||
| 961 | } | ||
| 962 | |||
| 963 | if (!mlxsw_sp_port->split) | 955 | if (!mlxsw_sp_port->split) |
| 964 | err = snprintf(name, len, "p%d", module + 1); | 956 | err = snprintf(name, len, "p%d", module + 1); |
| 965 | else | 957 | else |
| @@ -1681,8 +1673,8 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) | |||
| 1681 | return 0; | 1673 | return 0; |
| 1682 | } | 1674 | } |
| 1683 | 1675 | ||
| 1684 | static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | 1676 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
| 1685 | bool split, u8 module, u8 width) | 1677 | bool split, u8 module, u8 width, u8 lane) |
| 1686 | { | 1678 | { |
| 1687 | struct mlxsw_sp_port *mlxsw_sp_port; | 1679 | struct mlxsw_sp_port *mlxsw_sp_port; |
| 1688 | struct net_device *dev; | 1680 | struct net_device *dev; |
| @@ -1697,6 +1689,9 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
| 1697 | mlxsw_sp_port->mlxsw_sp = mlxsw_sp; | 1689 | mlxsw_sp_port->mlxsw_sp = mlxsw_sp; |
| 1698 | mlxsw_sp_port->local_port = local_port; | 1690 | mlxsw_sp_port->local_port = local_port; |
| 1699 | mlxsw_sp_port->split = split; | 1691 | mlxsw_sp_port->split = split; |
| 1692 | mlxsw_sp_port->mapping.module = module; | ||
| 1693 | mlxsw_sp_port->mapping.width = width; | ||
| 1694 | mlxsw_sp_port->mapping.lane = lane; | ||
| 1700 | bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); | 1695 | bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); |
| 1701 | mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); | 1696 | mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); |
| 1702 | if (!mlxsw_sp_port->active_vlans) { | 1697 | if (!mlxsw_sp_port->active_vlans) { |
| @@ -1839,28 +1834,6 @@ err_port_active_vlans_alloc: | |||
| 1839 | return err; | 1834 | return err; |
| 1840 | } | 1835 | } |
| 1841 | 1836 | ||
| 1842 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | ||
| 1843 | bool split, u8 module, u8 width, u8 lane) | ||
| 1844 | { | ||
| 1845 | int err; | ||
| 1846 | |||
| 1847 | err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, | ||
| 1848 | lane); | ||
| 1849 | if (err) | ||
| 1850 | return err; | ||
| 1851 | |||
| 1852 | err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module, | ||
| 1853 | width); | ||
| 1854 | if (err) | ||
| 1855 | goto err_port_create; | ||
| 1856 | |||
| 1857 | return 0; | ||
| 1858 | |||
| 1859 | err_port_create: | ||
| 1860 | mlxsw_sp_port_module_unmap(mlxsw_sp, local_port); | ||
| 1861 | return err; | ||
| 1862 | } | ||
| 1863 | |||
| 1864 | static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) | 1837 | static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) |
| 1865 | { | 1838 | { |
| 1866 | struct net_device *dev = mlxsw_sp_port->dev; | 1839 | struct net_device *dev = mlxsw_sp_port->dev; |
| @@ -1909,8 +1882,8 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) | |||
| 1909 | 1882 | ||
| 1910 | static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) | 1883 | static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) |
| 1911 | { | 1884 | { |
| 1885 | u8 module, width, lane; | ||
| 1912 | size_t alloc_size; | 1886 | size_t alloc_size; |
| 1913 | u8 module, width; | ||
| 1914 | int i; | 1887 | int i; |
| 1915 | int err; | 1888 | int err; |
| 1916 | 1889 | ||
| @@ -1921,13 +1894,14 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) | |||
| 1921 | 1894 | ||
| 1922 | for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { | 1895 | for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { |
| 1923 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, | 1896 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, |
| 1924 | &width); | 1897 | &width, &lane); |
| 1925 | if (err) | 1898 | if (err) |
| 1926 | goto err_port_module_info_get; | 1899 | goto err_port_module_info_get; |
| 1927 | if (!width) | 1900 | if (!width) |
| 1928 | continue; | 1901 | continue; |
| 1929 | mlxsw_sp->port_to_module[i] = module; | 1902 | mlxsw_sp->port_to_module[i] = module; |
| 1930 | err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width); | 1903 | err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width, |
| 1904 | lane); | ||
| 1931 | if (err) | 1905 | if (err) |
| 1932 | goto err_port_create; | 1906 | goto err_port_create; |
| 1933 | } | 1907 | } |
| @@ -1948,12 +1922,85 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) | |||
| 1948 | return local_port - offset; | 1922 | return local_port - offset; |
| 1949 | } | 1923 | } |
| 1950 | 1924 | ||
| 1925 | static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, | ||
| 1926 | u8 module, unsigned int count) | ||
| 1927 | { | ||
| 1928 | u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; | ||
| 1929 | int err, i; | ||
| 1930 | |||
| 1931 | for (i = 0; i < count; i++) { | ||
| 1932 | err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module, | ||
| 1933 | width, i * width); | ||
| 1934 | if (err) | ||
| 1935 | goto err_port_module_map; | ||
| 1936 | } | ||
| 1937 | |||
| 1938 | for (i = 0; i < count; i++) { | ||
| 1939 | err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0); | ||
| 1940 | if (err) | ||
| 1941 | goto err_port_swid_set; | ||
| 1942 | } | ||
| 1943 | |||
| 1944 | for (i = 0; i < count; i++) { | ||
| 1945 | err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, | ||
| 1946 | module, width, i * width); | ||
| 1947 | if (err) | ||
| 1948 | goto err_port_create; | ||
| 1949 | } | ||
| 1950 | |||
| 1951 | return 0; | ||
| 1952 | |||
| 1953 | err_port_create: | ||
| 1954 | for (i--; i >= 0; i--) | ||
| 1955 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | ||
| 1956 | i = count; | ||
| 1957 | err_port_swid_set: | ||
| 1958 | for (i--; i >= 0; i--) | ||
| 1959 | __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, | ||
| 1960 | MLXSW_PORT_SWID_DISABLED_PORT); | ||
| 1961 | i = count; | ||
| 1962 | err_port_module_map: | ||
| 1963 | for (i--; i >= 0; i--) | ||
| 1964 | mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i); | ||
| 1965 | return err; | ||
| 1966 | } | ||
| 1967 | |||
| 1968 | static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, | ||
| 1969 | u8 base_port, unsigned int count) | ||
| 1970 | { | ||
| 1971 | u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; | ||
| 1972 | int i; | ||
| 1973 | |||
| 1974 | /* Split by four means we need to re-create two ports, otherwise | ||
| 1975 | * only one. | ||
| 1976 | */ | ||
| 1977 | count = count / 2; | ||
| 1978 | |||
| 1979 | for (i = 0; i < count; i++) { | ||
| 1980 | local_port = base_port + i * 2; | ||
| 1981 | module = mlxsw_sp->port_to_module[local_port]; | ||
| 1982 | |||
| 1983 | mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, | ||
| 1984 | 0); | ||
| 1985 | } | ||
| 1986 | |||
| 1987 | for (i = 0; i < count; i++) | ||
| 1988 | __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0); | ||
| 1989 | |||
| 1990 | for (i = 0; i < count; i++) { | ||
| 1991 | local_port = base_port + i * 2; | ||
| 1992 | module = mlxsw_sp->port_to_module[local_port]; | ||
| 1993 | |||
| 1994 | mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, | ||
| 1995 | width, 0); | ||
| 1996 | } | ||
| 1997 | } | ||
| 1998 | |||
| 1951 | static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, | 1999 | static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, |
| 1952 | unsigned int count) | 2000 | unsigned int count) |
| 1953 | { | 2001 | { |
| 1954 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); | 2002 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); |
| 1955 | struct mlxsw_sp_port *mlxsw_sp_port; | 2003 | struct mlxsw_sp_port *mlxsw_sp_port; |
| 1956 | u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; | ||
| 1957 | u8 module, cur_width, base_port; | 2004 | u8 module, cur_width, base_port; |
| 1958 | int i; | 2005 | int i; |
| 1959 | int err; | 2006 | int err; |
| @@ -1965,18 +2012,14 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, | |||
| 1965 | return -EINVAL; | 2012 | return -EINVAL; |
| 1966 | } | 2013 | } |
| 1967 | 2014 | ||
| 2015 | module = mlxsw_sp_port->mapping.module; | ||
| 2016 | cur_width = mlxsw_sp_port->mapping.width; | ||
| 2017 | |||
| 1968 | if (count != 2 && count != 4) { | 2018 | if (count != 2 && count != 4) { |
| 1969 | netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); | 2019 | netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); |
| 1970 | return -EINVAL; | 2020 | return -EINVAL; |
| 1971 | } | 2021 | } |
| 1972 | 2022 | ||
| 1973 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, | ||
| 1974 | &cur_width); | ||
| 1975 | if (err) { | ||
| 1976 | netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); | ||
| 1977 | return err; | ||
| 1978 | } | ||
| 1979 | |||
| 1980 | if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { | 2023 | if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { |
| 1981 | netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); | 2024 | netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); |
| 1982 | return -EINVAL; | 2025 | return -EINVAL; |
| @@ -2001,25 +2044,16 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, | |||
| 2001 | for (i = 0; i < count; i++) | 2044 | for (i = 0; i < count; i++) |
| 2002 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | 2045 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); |
| 2003 | 2046 | ||
| 2004 | for (i = 0; i < count; i++) { | 2047 | err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); |
| 2005 | err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, | 2048 | if (err) { |
| 2006 | module, width, i * width); | 2049 | dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); |
| 2007 | if (err) { | 2050 | goto err_port_split_create; |
| 2008 | dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n"); | ||
| 2009 | goto err_port_create; | ||
| 2010 | } | ||
| 2011 | } | 2051 | } |
| 2012 | 2052 | ||
| 2013 | return 0; | 2053 | return 0; |
| 2014 | 2054 | ||
| 2015 | err_port_create: | 2055 | err_port_split_create: |
| 2016 | for (i--; i >= 0; i--) | 2056 | mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); |
| 2017 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | ||
| 2018 | for (i = 0; i < count / 2; i++) { | ||
| 2019 | module = mlxsw_sp->port_to_module[base_port + i * 2]; | ||
| 2020 | mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, | ||
| 2021 | module, MLXSW_PORT_MODULE_MAX_WIDTH, 0); | ||
| 2022 | } | ||
| 2023 | return err; | 2057 | return err; |
| 2024 | } | 2058 | } |
| 2025 | 2059 | ||
| @@ -2027,10 +2061,9 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) | |||
| 2027 | { | 2061 | { |
| 2028 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); | 2062 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); |
| 2029 | struct mlxsw_sp_port *mlxsw_sp_port; | 2063 | struct mlxsw_sp_port *mlxsw_sp_port; |
| 2030 | u8 module, cur_width, base_port; | 2064 | u8 cur_width, base_port; |
| 2031 | unsigned int count; | 2065 | unsigned int count; |
| 2032 | int i; | 2066 | int i; |
| 2033 | int err; | ||
| 2034 | 2067 | ||
| 2035 | mlxsw_sp_port = mlxsw_sp->ports[local_port]; | 2068 | mlxsw_sp_port = mlxsw_sp->ports[local_port]; |
| 2036 | if (!mlxsw_sp_port) { | 2069 | if (!mlxsw_sp_port) { |
| @@ -2044,12 +2077,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) | |||
| 2044 | return -EINVAL; | 2077 | return -EINVAL; |
| 2045 | } | 2078 | } |
| 2046 | 2079 | ||
| 2047 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, | 2080 | cur_width = mlxsw_sp_port->mapping.width; |
| 2048 | &cur_width); | ||
| 2049 | if (err) { | ||
| 2050 | netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); | ||
| 2051 | return err; | ||
| 2052 | } | ||
| 2053 | count = cur_width == 1 ? 4 : 2; | 2081 | count = cur_width == 1 ? 4 : 2; |
| 2054 | 2082 | ||
| 2055 | base_port = mlxsw_sp_cluster_base_port_get(local_port); | 2083 | base_port = mlxsw_sp_cluster_base_port_get(local_port); |
| @@ -2061,14 +2089,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) | |||
| 2061 | for (i = 0; i < count; i++) | 2089 | for (i = 0; i < count; i++) |
| 2062 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | 2090 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); |
| 2063 | 2091 | ||
| 2064 | for (i = 0; i < count / 2; i++) { | 2092 | mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); |
| 2065 | module = mlxsw_sp->port_to_module[base_port + i * 2]; | ||
| 2066 | err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, | ||
| 2067 | module, MLXSW_PORT_MODULE_MAX_WIDTH, | ||
| 2068 | 0); | ||
| 2069 | if (err) | ||
| 2070 | dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n"); | ||
| 2071 | } | ||
| 2072 | 2093 | ||
| 2073 | return 0; | 2094 | return 0; |
| 2074 | } | 2095 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index e2c022d3e2f3..13b30eaa13d4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
| @@ -229,6 +229,11 @@ struct mlxsw_sp_port { | |||
| 229 | struct ieee_maxrate *maxrate; | 229 | struct ieee_maxrate *maxrate; |
| 230 | struct ieee_pfc *pfc; | 230 | struct ieee_pfc *pfc; |
| 231 | } dcb; | 231 | } dcb; |
| 232 | struct { | ||
| 233 | u8 module; | ||
| 234 | u8 width; | ||
| 235 | u8 lane; | ||
| 236 | } mapping; | ||
| 232 | /* 802.1Q bridge VLANs */ | 237 | /* 802.1Q bridge VLANs */ |
| 233 | unsigned long *active_vlans; | 238 | unsigned long *active_vlans; |
| 234 | unsigned long *untagged_vlans; | 239 | unsigned long *untagged_vlans; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 753064679bde..61cc6869fa65 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
| @@ -1105,6 +1105,39 @@ static int qed_get_port_type(u32 media_type) | |||
| 1105 | return port_type; | 1105 | return port_type; |
| 1106 | } | 1106 | } |
| 1107 | 1107 | ||
| 1108 | static int qed_get_link_data(struct qed_hwfn *hwfn, | ||
| 1109 | struct qed_mcp_link_params *params, | ||
| 1110 | struct qed_mcp_link_state *link, | ||
| 1111 | struct qed_mcp_link_capabilities *link_caps) | ||
| 1112 | { | ||
| 1113 | void *p; | ||
| 1114 | |||
| 1115 | if (!IS_PF(hwfn->cdev)) { | ||
| 1116 | qed_vf_get_link_params(hwfn, params); | ||
| 1117 | qed_vf_get_link_state(hwfn, link); | ||
| 1118 | qed_vf_get_link_caps(hwfn, link_caps); | ||
| 1119 | |||
| 1120 | return 0; | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | p = qed_mcp_get_link_params(hwfn); | ||
| 1124 | if (!p) | ||
| 1125 | return -ENXIO; | ||
| 1126 | memcpy(params, p, sizeof(*params)); | ||
| 1127 | |||
| 1128 | p = qed_mcp_get_link_state(hwfn); | ||
| 1129 | if (!p) | ||
| 1130 | return -ENXIO; | ||
| 1131 | memcpy(link, p, sizeof(*link)); | ||
| 1132 | |||
| 1133 | p = qed_mcp_get_link_capabilities(hwfn); | ||
| 1134 | if (!p) | ||
| 1135 | return -ENXIO; | ||
| 1136 | memcpy(link_caps, p, sizeof(*link_caps)); | ||
| 1137 | |||
| 1138 | return 0; | ||
| 1139 | } | ||
| 1140 | |||
| 1108 | static void qed_fill_link(struct qed_hwfn *hwfn, | 1141 | static void qed_fill_link(struct qed_hwfn *hwfn, |
| 1109 | struct qed_link_output *if_link) | 1142 | struct qed_link_output *if_link) |
| 1110 | { | 1143 | { |
| @@ -1116,15 +1149,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn, | |||
| 1116 | memset(if_link, 0, sizeof(*if_link)); | 1149 | memset(if_link, 0, sizeof(*if_link)); |
| 1117 | 1150 | ||
| 1118 | /* Prepare source inputs */ | 1151 | /* Prepare source inputs */ |
| 1119 | if (IS_PF(hwfn->cdev)) { | 1152 | if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { |
| 1120 | memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); | 1153 | dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); |
| 1121 | memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); | 1154 | return; |
| 1122 | memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), | ||
| 1123 | sizeof(link_caps)); | ||
| 1124 | } else { | ||
| 1125 | qed_vf_get_link_params(hwfn, ¶ms); | ||
| 1126 | qed_vf_get_link_state(hwfn, &link); | ||
| 1127 | qed_vf_get_link_caps(hwfn, &link_caps); | ||
| 1128 | } | 1155 | } |
| 1129 | 1156 | ||
| 1130 | /* Set the link parameters to pass to protocol driver */ | 1157 | /* Set the link parameters to pass to protocol driver */ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index c8667c65e685..c90b2b6ad969 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h | |||
| @@ -12,11 +12,13 @@ | |||
| 12 | #include "qed_vf.h" | 12 | #include "qed_vf.h" |
| 13 | #define QED_VF_ARRAY_LENGTH (3) | 13 | #define QED_VF_ARRAY_LENGTH (3) |
| 14 | 14 | ||
| 15 | #ifdef CONFIG_QED_SRIOV | ||
| 15 | #define IS_VF(cdev) ((cdev)->b_is_vf) | 16 | #define IS_VF(cdev) ((cdev)->b_is_vf) |
| 16 | #define IS_PF(cdev) (!((cdev)->b_is_vf)) | 17 | #define IS_PF(cdev) (!((cdev)->b_is_vf)) |
| 17 | #ifdef CONFIG_QED_SRIOV | ||
| 18 | #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) | 18 | #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) |
| 19 | #else | 19 | #else |
| 20 | #define IS_VF(cdev) (0) | ||
| 21 | #define IS_PF(cdev) (1) | ||
| 20 | #define IS_PF_SRIOV(p_hwfn) (0) | 22 | #define IS_PF_SRIOV(p_hwfn) (0) |
| 21 | #endif | 23 | #endif |
| 22 | #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) | 24 | #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 5d00d1404bfc..5733d1888223 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
| @@ -87,7 +87,9 @@ static const struct pci_device_id qede_pci_tbl[] = { | |||
| 87 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, | 87 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, |
| 88 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, | 88 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, |
| 89 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, | 89 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, |
| 90 | #ifdef CONFIG_QED_SRIOV | ||
| 90 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, | 91 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, |
| 92 | #endif | ||
| 91 | { 0 } | 93 | { 0 } |
| 92 | }; | 94 | }; |
| 93 | 95 | ||
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 7f295c4d7b80..2a9228a6e4a0 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c | |||
| @@ -189,11 +189,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap) | |||
| 189 | 189 | ||
| 190 | case MC_CMD_MEDIA_XFP: | 190 | case MC_CMD_MEDIA_XFP: |
| 191 | case MC_CMD_MEDIA_SFP_PLUS: | 191 | case MC_CMD_MEDIA_SFP_PLUS: |
| 192 | result |= SUPPORTED_FIBRE; | ||
| 193 | break; | ||
| 194 | |||
| 195 | case MC_CMD_MEDIA_QSFP_PLUS: | 192 | case MC_CMD_MEDIA_QSFP_PLUS: |
| 196 | result |= SUPPORTED_FIBRE; | 193 | result |= SUPPORTED_FIBRE; |
| 194 | if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) | ||
| 195 | result |= SUPPORTED_1000baseT_Full; | ||
| 196 | if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) | ||
| 197 | result |= SUPPORTED_10000baseT_Full; | ||
| 197 | if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) | 198 | if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) |
| 198 | result |= SUPPORTED_40000baseCR4_Full; | 199 | result |= SUPPORTED_40000baseCR4_Full; |
| 199 | break; | 200 | break; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 4f7283d05588..44da877d2483 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | |||
| @@ -156,7 +156,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw, | |||
| 156 | struct netdev_hw_addr *ha; | 156 | struct netdev_hw_addr *ha; |
| 157 | 157 | ||
| 158 | netdev_for_each_uc_addr(ha, dev) { | 158 | netdev_for_each_uc_addr(ha, dev) { |
| 159 | dwmac4_set_umac_addr(ioaddr, ha->addr, reg); | 159 | dwmac4_set_umac_addr(hw, ha->addr, reg); |
| 160 | reg++; | 160 | reg++; |
| 161 | } | 161 | } |
| 162 | } | 162 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index eac45d0c75e2..a473c182c91d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -3450,8 +3450,6 @@ int stmmac_resume(struct device *dev) | |||
| 3450 | if (!netif_running(ndev)) | 3450 | if (!netif_running(ndev)) |
| 3451 | return 0; | 3451 | return 0; |
| 3452 | 3452 | ||
| 3453 | spin_lock_irqsave(&priv->lock, flags); | ||
| 3454 | |||
| 3455 | /* Power Down bit, into the PM register, is cleared | 3453 | /* Power Down bit, into the PM register, is cleared |
| 3456 | * automatically as soon as a magic packet or a Wake-up frame | 3454 | * automatically as soon as a magic packet or a Wake-up frame |
| 3457 | * is received. Anyway, it's better to manually clear | 3455 | * is received. Anyway, it's better to manually clear |
| @@ -3459,7 +3457,9 @@ int stmmac_resume(struct device *dev) | |||
| 3459 | * from another devices (e.g. serial console). | 3457 | * from another devices (e.g. serial console). |
| 3460 | */ | 3458 | */ |
| 3461 | if (device_may_wakeup(priv->device)) { | 3459 | if (device_may_wakeup(priv->device)) { |
| 3460 | spin_lock_irqsave(&priv->lock, flags); | ||
| 3462 | priv->hw->mac->pmt(priv->hw, 0); | 3461 | priv->hw->mac->pmt(priv->hw, 0); |
| 3462 | spin_unlock_irqrestore(&priv->lock, flags); | ||
| 3463 | priv->irq_wake = 0; | 3463 | priv->irq_wake = 0; |
| 3464 | } else { | 3464 | } else { |
| 3465 | pinctrl_pm_select_default_state(priv->device); | 3465 | pinctrl_pm_select_default_state(priv->device); |
| @@ -3473,6 +3473,8 @@ int stmmac_resume(struct device *dev) | |||
| 3473 | 3473 | ||
| 3474 | netif_device_attach(ndev); | 3474 | netif_device_attach(ndev); |
| 3475 | 3475 | ||
| 3476 | spin_lock_irqsave(&priv->lock, flags); | ||
| 3477 | |||
| 3476 | priv->cur_rx = 0; | 3478 | priv->cur_rx = 0; |
| 3477 | priv->dirty_rx = 0; | 3479 | priv->dirty_rx = 0; |
| 3478 | priv->dirty_tx = 0; | 3480 | priv->dirty_tx = 0; |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 4b08a2f52b3e..e6bb0ecb12c7 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
| @@ -1339,7 +1339,7 @@ static int cpsw_ndo_open(struct net_device *ndev) | |||
| 1339 | if (priv->coal_intvl != 0) { | 1339 | if (priv->coal_intvl != 0) { |
| 1340 | struct ethtool_coalesce coal; | 1340 | struct ethtool_coalesce coal; |
| 1341 | 1341 | ||
| 1342 | coal.rx_coalesce_usecs = (priv->coal_intvl << 4); | 1342 | coal.rx_coalesce_usecs = priv->coal_intvl; |
| 1343 | cpsw_set_coalesce(ndev, &coal); | 1343 | cpsw_set_coalesce(ndev, &coal); |
| 1344 | } | 1344 | } |
| 1345 | 1345 | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index db8022ae415b..08885bc8d6db 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
| @@ -1369,7 +1369,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
| 1369 | rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; | 1369 | rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; |
| 1370 | 1370 | ||
| 1371 | segCnt = rcdlro->segCnt; | 1371 | segCnt = rcdlro->segCnt; |
| 1372 | BUG_ON(segCnt <= 1); | 1372 | WARN_ON_ONCE(segCnt == 0); |
| 1373 | mss = rcdlro->mss; | 1373 | mss = rcdlro->mss; |
| 1374 | if (unlikely(segCnt <= 1)) | 1374 | if (unlikely(segCnt <= 1)) |
| 1375 | segCnt = 0; | 1375 | segCnt = 0; |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index c4825392d64b..3d2b64e63408 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
| @@ -69,10 +69,10 @@ | |||
| 69 | /* | 69 | /* |
| 70 | * Version numbers | 70 | * Version numbers |
| 71 | */ | 71 | */ |
| 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k" |
| 73 | 73 | ||
| 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
| 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040700 | 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040800 |
| 76 | 76 | ||
| 77 | #if defined(CONFIG_PCI_MSI) | 77 | #if defined(CONFIG_PCI_MSI) |
| 78 | /* RSS only makes sense if MSI-X is supported. */ | 78 | /* RSS only makes sense if MSI-X is supported. */ |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index d0631b6cfd53..62f475e31077 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | |||
| @@ -2540,12 +2540,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, | |||
| 2540 | const u8 *mac, struct station_info *sinfo) | 2540 | const u8 *mac, struct station_info *sinfo) |
| 2541 | { | 2541 | { |
| 2542 | struct brcmf_if *ifp = netdev_priv(ndev); | 2542 | struct brcmf_if *ifp = netdev_priv(ndev); |
| 2543 | struct brcmf_scb_val_le scb_val; | ||
| 2543 | s32 err = 0; | 2544 | s32 err = 0; |
| 2544 | struct brcmf_sta_info_le sta_info_le; | 2545 | struct brcmf_sta_info_le sta_info_le; |
| 2545 | u32 sta_flags; | 2546 | u32 sta_flags; |
| 2546 | u32 is_tdls_peer; | 2547 | u32 is_tdls_peer; |
| 2547 | s32 total_rssi; | 2548 | s32 total_rssi; |
| 2548 | s32 count_rssi; | 2549 | s32 count_rssi; |
| 2550 | int rssi; | ||
| 2549 | u32 i; | 2551 | u32 i; |
| 2550 | 2552 | ||
| 2551 | brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac); | 2553 | brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac); |
| @@ -2629,6 +2631,20 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, | |||
| 2629 | sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); | 2631 | sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); |
| 2630 | total_rssi /= count_rssi; | 2632 | total_rssi /= count_rssi; |
| 2631 | sinfo->signal = total_rssi; | 2633 | sinfo->signal = total_rssi; |
| 2634 | } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED, | ||
| 2635 | &ifp->vif->sme_state)) { | ||
| 2636 | memset(&scb_val, 0, sizeof(scb_val)); | ||
| 2637 | err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI, | ||
| 2638 | &scb_val, sizeof(scb_val)); | ||
| 2639 | if (err) { | ||
| 2640 | brcmf_err("Could not get rssi (%d)\n", err); | ||
| 2641 | goto done; | ||
| 2642 | } else { | ||
| 2643 | rssi = le32_to_cpu(scb_val.val); | ||
| 2644 | sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); | ||
| 2645 | sinfo->signal = rssi; | ||
| 2646 | brcmf_dbg(CONN, "RSSI %d dBm\n", rssi); | ||
| 2647 | } | ||
| 2632 | } | 2648 | } |
| 2633 | } | 2649 | } |
| 2634 | done: | 2650 | done: |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 68f1ce02f4bf..2b9a2bc429d6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c | |||
| @@ -1157,6 +1157,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) | |||
| 1157 | brcmu_pkt_buf_free_skb(skb); | 1157 | brcmu_pkt_buf_free_skb(skb); |
| 1158 | return; | 1158 | return; |
| 1159 | } | 1159 | } |
| 1160 | |||
| 1161 | skb->protocol = eth_type_trans(skb, ifp->ndev); | ||
| 1160 | brcmf_netif_rx(ifp, skb); | 1162 | brcmf_netif_rx(ifp, skb); |
| 1161 | } | 1163 | } |
| 1162 | 1164 | ||
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 9ed0ed1bf514..4dd5adcdd29b 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -2776,6 +2776,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, | |||
| 2776 | if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || | 2776 | if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || |
| 2777 | !info->attrs[HWSIM_ATTR_FLAGS] || | 2777 | !info->attrs[HWSIM_ATTR_FLAGS] || |
| 2778 | !info->attrs[HWSIM_ATTR_COOKIE] || | 2778 | !info->attrs[HWSIM_ATTR_COOKIE] || |
| 2779 | !info->attrs[HWSIM_ATTR_SIGNAL] || | ||
| 2779 | !info->attrs[HWSIM_ATTR_TX_INFO]) | 2780 | !info->attrs[HWSIM_ATTR_TX_INFO]) |
| 2780 | goto out; | 2781 | goto out; |
| 2781 | 2782 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 0f48048b8654..3a0faa8fe9d4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c | |||
| @@ -54,7 +54,7 @@ EXPORT_SYMBOL(channel5g_80m); | |||
| 54 | void rtl_addr_delay(u32 addr) | 54 | void rtl_addr_delay(u32 addr) |
| 55 | { | 55 | { |
| 56 | if (addr == 0xfe) | 56 | if (addr == 0xfe) |
| 57 | msleep(50); | 57 | mdelay(50); |
| 58 | else if (addr == 0xfd) | 58 | else if (addr == 0xfd) |
| 59 | msleep(5); | 59 | msleep(5); |
| 60 | else if (addr == 0xfc) | 60 | else if (addr == 0xfc) |
| @@ -75,7 +75,7 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr, | |||
| 75 | rtl_addr_delay(addr); | 75 | rtl_addr_delay(addr); |
| 76 | } else { | 76 | } else { |
| 77 | rtl_set_rfreg(hw, rfpath, addr, mask, data); | 77 | rtl_set_rfreg(hw, rfpath, addr, mask, data); |
| 78 | usleep_range(1, 2); | 78 | udelay(1); |
| 79 | } | 79 | } |
| 80 | } | 80 | } |
| 81 | EXPORT_SYMBOL(rtl_rfreg_delay); | 81 | EXPORT_SYMBOL(rtl_rfreg_delay); |
| @@ -86,7 +86,7 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data) | |||
| 86 | rtl_addr_delay(addr); | 86 | rtl_addr_delay(addr); |
| 87 | } else { | 87 | } else { |
| 88 | rtl_set_bbreg(hw, addr, MASKDWORD, data); | 88 | rtl_set_bbreg(hw, addr, MASKDWORD, data); |
| 89 | usleep_range(1, 2); | 89 | udelay(1); |
| 90 | } | 90 | } |
| 91 | } | 91 | } |
| 92 | EXPORT_SYMBOL(rtl_bb_delay); | 92 | EXPORT_SYMBOL(rtl_bb_delay); |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 78dca3193ca4..befac5b19490 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -1679,9 +1679,14 @@ static int nvme_pci_enable(struct nvme_dev *dev) | |||
| 1679 | 1679 | ||
| 1680 | static void nvme_dev_unmap(struct nvme_dev *dev) | 1680 | static void nvme_dev_unmap(struct nvme_dev *dev) |
| 1681 | { | 1681 | { |
| 1682 | struct pci_dev *pdev = to_pci_dev(dev->dev); | ||
| 1683 | int bars; | ||
| 1684 | |||
| 1682 | if (dev->bar) | 1685 | if (dev->bar) |
| 1683 | iounmap(dev->bar); | 1686 | iounmap(dev->bar); |
| 1684 | pci_release_regions(to_pci_dev(dev->dev)); | 1687 | |
| 1688 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
| 1689 | pci_release_selected_regions(pdev, bars); | ||
| 1685 | } | 1690 | } |
| 1686 | 1691 | ||
| 1687 | static void nvme_pci_disable(struct nvme_dev *dev) | 1692 | static void nvme_pci_disable(struct nvme_dev *dev) |
| @@ -1924,7 +1929,7 @@ static int nvme_dev_map(struct nvme_dev *dev) | |||
| 1924 | 1929 | ||
| 1925 | return 0; | 1930 | return 0; |
| 1926 | release: | 1931 | release: |
| 1927 | pci_release_regions(pdev); | 1932 | pci_release_selected_regions(pdev, bars); |
| 1928 | return -ENODEV; | 1933 | return -ENODEV; |
| 1929 | } | 1934 | } |
| 1930 | 1935 | ||
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 14f2f8c7c260..33daffc4392c 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
| @@ -395,7 +395,7 @@ static int unflatten_dt_nodes(const void *blob, | |||
| 395 | struct device_node **nodepp) | 395 | struct device_node **nodepp) |
| 396 | { | 396 | { |
| 397 | struct device_node *root; | 397 | struct device_node *root; |
| 398 | int offset = 0, depth = 0; | 398 | int offset = 0, depth = 0, initial_depth = 0; |
| 399 | #define FDT_MAX_DEPTH 64 | 399 | #define FDT_MAX_DEPTH 64 |
| 400 | unsigned int fpsizes[FDT_MAX_DEPTH]; | 400 | unsigned int fpsizes[FDT_MAX_DEPTH]; |
| 401 | struct device_node *nps[FDT_MAX_DEPTH]; | 401 | struct device_node *nps[FDT_MAX_DEPTH]; |
| @@ -405,11 +405,22 @@ static int unflatten_dt_nodes(const void *blob, | |||
| 405 | if (nodepp) | 405 | if (nodepp) |
| 406 | *nodepp = NULL; | 406 | *nodepp = NULL; |
| 407 | 407 | ||
| 408 | /* | ||
| 409 | * We're unflattening device sub-tree if @dad is valid. There are | ||
| 410 | * possibly multiple nodes in the first level of depth. We need | ||
| 411 | * set @depth to 1 to make fdt_next_node() happy as it bails | ||
| 412 | * immediately when negative @depth is found. Otherwise, the device | ||
| 413 | * nodes except the first one won't be unflattened successfully. | ||
| 414 | */ | ||
| 415 | if (dad) | ||
| 416 | depth = initial_depth = 1; | ||
| 417 | |||
| 408 | root = dad; | 418 | root = dad; |
| 409 | fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0; | 419 | fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0; |
| 410 | nps[depth] = dad; | 420 | nps[depth] = dad; |
| 421 | |||
| 411 | for (offset = 0; | 422 | for (offset = 0; |
| 412 | offset >= 0 && depth >= 0; | 423 | offset >= 0 && depth >= initial_depth; |
| 413 | offset = fdt_next_node(blob, offset, &depth)) { | 424 | offset = fdt_next_node(blob, offset, &depth)) { |
| 414 | if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) | 425 | if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) |
| 415 | continue; | 426 | continue; |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index e7bfc175b8e1..6ec743faabe8 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
| @@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) | |||
| 386 | EXPORT_SYMBOL_GPL(of_irq_to_resource); | 386 | EXPORT_SYMBOL_GPL(of_irq_to_resource); |
| 387 | 387 | ||
| 388 | /** | 388 | /** |
| 389 | * of_irq_get - Decode a node's IRQ and return it as a Linux irq number | 389 | * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number |
| 390 | * @dev: pointer to device tree node | 390 | * @dev: pointer to device tree node |
| 391 | * @index: zero-based index of the irq | 391 | * @index: zero-based index of the IRQ |
| 392 | * | ||
| 393 | * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain | ||
| 394 | * is not yet created. | ||
| 395 | * | 392 | * |
| 393 | * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or | ||
| 394 | * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case | ||
| 395 | * of any other failure. | ||
| 396 | */ | 396 | */ |
| 397 | int of_irq_get(struct device_node *dev, int index) | 397 | int of_irq_get(struct device_node *dev, int index) |
| 398 | { | 398 | { |
| @@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index) | |||
| 413 | EXPORT_SYMBOL_GPL(of_irq_get); | 413 | EXPORT_SYMBOL_GPL(of_irq_get); |
| 414 | 414 | ||
| 415 | /** | 415 | /** |
| 416 | * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number | 416 | * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number |
| 417 | * @dev: pointer to device tree node | 417 | * @dev: pointer to device tree node |
| 418 | * @name: irq name | 418 | * @name: IRQ name |
| 419 | * | 419 | * |
| 420 | * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain | 420 | * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or |
| 421 | * is not yet created, or error code in case of any other failure. | 421 | * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case |
| 422 | * of any other failure. | ||
| 422 | */ | 423 | */ |
| 423 | int of_irq_get_byname(struct device_node *dev, const char *name) | 424 | int of_irq_get_byname(struct device_node *dev, const char *name) |
| 424 | { | 425 | { |
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index ed01c0172e4a..216648233874 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c | |||
| @@ -127,8 +127,15 @@ static int __init __reserved_mem_alloc_size(unsigned long node, | |||
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | /* Need adjust the alignment to satisfy the CMA requirement */ | 129 | /* Need adjust the alignment to satisfy the CMA requirement */ |
| 130 | if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) | 130 | if (IS_ENABLED(CONFIG_CMA) |
| 131 | align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); | 131 | && of_flat_dt_is_compatible(node, "shared-dma-pool") |
| 132 | && of_get_flat_dt_prop(node, "reusable", NULL) | ||
| 133 | && !of_get_flat_dt_prop(node, "no-map", NULL)) { | ||
| 134 | unsigned long order = | ||
| 135 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order); | ||
| 136 | |||
| 137 | align = max(align, (phys_addr_t)PAGE_SIZE << order); | ||
| 138 | } | ||
| 132 | 139 | ||
| 133 | prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); | 140 | prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); |
| 134 | if (prop) { | 141 | if (prop) { |
diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c index dfbab61a1b47..1fa3a3219c45 100644 --- a/drivers/pci/vc.c +++ b/drivers/pci/vc.c | |||
| @@ -221,9 +221,9 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos, | |||
| 221 | else | 221 | else |
| 222 | pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL, | 222 | pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL, |
| 223 | *(u16 *)buf); | 223 | *(u16 *)buf); |
| 224 | buf += 2; | 224 | buf += 4; |
| 225 | } | 225 | } |
| 226 | len += 2; | 226 | len += 4; |
| 227 | 227 | ||
| 228 | /* | 228 | /* |
| 229 | * If we have any Low Priority VCs and a VC Arbitration Table Offset | 229 | * If we have any Low Priority VCs and a VC Arbitration Table Offset |
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 1b8304e1efaa..140436a046c0 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
| @@ -1010,8 +1010,8 @@ int arm_pmu_device_probe(struct platform_device *pdev, | |||
| 1010 | if (!ret) | 1010 | if (!ret) |
| 1011 | ret = init_fn(pmu); | 1011 | ret = init_fn(pmu); |
| 1012 | } else { | 1012 | } else { |
| 1013 | ret = probe_current_pmu(pmu, probe_table); | ||
| 1014 | cpumask_setall(&pmu->supported_cpus); | 1013 | cpumask_setall(&pmu->supported_cpus); |
| 1014 | ret = probe_current_pmu(pmu, probe_table); | ||
| 1015 | } | 1015 | } |
| 1016 | 1016 | ||
| 1017 | if (ret) { | 1017 | if (ret) { |
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c index cc093ebfda94..8b851f718123 100644 --- a/drivers/phy/phy-exynos-mipi-video.c +++ b/drivers/phy/phy-exynos-mipi-video.c | |||
| @@ -233,8 +233,12 @@ static inline int __is_running(const struct exynos_mipi_phy_desc *data, | |||
| 233 | struct exynos_mipi_video_phy *state) | 233 | struct exynos_mipi_video_phy *state) |
| 234 | { | 234 | { |
| 235 | u32 val; | 235 | u32 val; |
| 236 | int ret; | ||
| 237 | |||
| 238 | ret = regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val); | ||
| 239 | if (ret) | ||
| 240 | return 0; | ||
| 236 | 241 | ||
| 237 | regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val); | ||
| 238 | return val & data->resetn_val; | 242 | return val & data->resetn_val; |
| 239 | } | 243 | } |
| 240 | 244 | ||
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 0a477d24cf76..bf46844dc387 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c | |||
| @@ -293,11 +293,18 @@ static int ti_pipe3_init(struct phy *x) | |||
| 293 | ret = ti_pipe3_dpll_wait_lock(phy); | 293 | ret = ti_pipe3_dpll_wait_lock(phy); |
| 294 | } | 294 | } |
| 295 | 295 | ||
| 296 | /* Program the DPLL only if not locked */ | 296 | /* SATA has issues if re-programmed when locked */ |
| 297 | val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); | 297 | val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); |
| 298 | if (!(val & PLL_LOCK)) | 298 | if ((val & PLL_LOCK) && of_device_is_compatible(phy->dev->of_node, |
| 299 | if (ti_pipe3_dpll_program(phy)) | 299 | "ti,phy-pipe3-sata")) |
| 300 | return -EINVAL; | 300 | return ret; |
| 301 | |||
| 302 | /* Program the DPLL */ | ||
| 303 | ret = ti_pipe3_dpll_program(phy); | ||
| 304 | if (ret) { | ||
| 305 | ti_pipe3_disable_clocks(phy); | ||
| 306 | return -EINVAL; | ||
| 307 | } | ||
| 301 | 308 | ||
| 302 | return ret; | 309 | return ret; |
| 303 | } | 310 | } |
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index 6b6af6cba454..d9b10a39a2cf 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c | |||
| @@ -463,7 +463,8 @@ static int twl4030_phy_power_on(struct phy *phy) | |||
| 463 | twl4030_usb_set_mode(twl, twl->usb_mode); | 463 | twl4030_usb_set_mode(twl, twl->usb_mode); |
| 464 | if (twl->usb_mode == T2_USB_MODE_ULPI) | 464 | if (twl->usb_mode == T2_USB_MODE_ULPI) |
| 465 | twl4030_i2c_access(twl, 0); | 465 | twl4030_i2c_access(twl, 0); |
| 466 | schedule_delayed_work(&twl->id_workaround_work, 0); | 466 | twl->linkstat = MUSB_UNKNOWN; |
| 467 | schedule_delayed_work(&twl->id_workaround_work, HZ); | ||
| 467 | 468 | ||
| 468 | return 0; | 469 | return 0; |
| 469 | } | 470 | } |
| @@ -537,6 +538,7 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl) | |||
| 537 | struct twl4030_usb *twl = _twl; | 538 | struct twl4030_usb *twl = _twl; |
| 538 | enum musb_vbus_id_status status; | 539 | enum musb_vbus_id_status status; |
| 539 | bool status_changed = false; | 540 | bool status_changed = false; |
| 541 | int err; | ||
| 540 | 542 | ||
| 541 | status = twl4030_usb_linkstat(twl); | 543 | status = twl4030_usb_linkstat(twl); |
| 542 | 544 | ||
| @@ -567,7 +569,9 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl) | |||
| 567 | pm_runtime_mark_last_busy(twl->dev); | 569 | pm_runtime_mark_last_busy(twl->dev); |
| 568 | pm_runtime_put_autosuspend(twl->dev); | 570 | pm_runtime_put_autosuspend(twl->dev); |
| 569 | } | 571 | } |
| 570 | musb_mailbox(status); | 572 | err = musb_mailbox(status); |
| 573 | if (err) | ||
| 574 | twl->linkstat = MUSB_UNKNOWN; | ||
| 571 | } | 575 | } |
| 572 | 576 | ||
| 573 | /* don't schedule during sleep - irq works right then */ | 577 | /* don't schedule during sleep - irq works right then */ |
| @@ -595,7 +599,8 @@ static int twl4030_phy_init(struct phy *phy) | |||
| 595 | struct twl4030_usb *twl = phy_get_drvdata(phy); | 599 | struct twl4030_usb *twl = phy_get_drvdata(phy); |
| 596 | 600 | ||
| 597 | pm_runtime_get_sync(twl->dev); | 601 | pm_runtime_get_sync(twl->dev); |
| 598 | schedule_delayed_work(&twl->id_workaround_work, 0); | 602 | twl->linkstat = MUSB_UNKNOWN; |
| 603 | schedule_delayed_work(&twl->id_workaround_work, HZ); | ||
| 599 | pm_runtime_mark_last_busy(twl->dev); | 604 | pm_runtime_mark_last_busy(twl->dev); |
| 600 | pm_runtime_put_autosuspend(twl->dev); | 605 | pm_runtime_put_autosuspend(twl->dev); |
| 601 | 606 | ||
| @@ -763,7 +768,8 @@ static int twl4030_usb_remove(struct platform_device *pdev) | |||
| 763 | if (cable_present(twl->linkstat)) | 768 | if (cable_present(twl->linkstat)) |
| 764 | pm_runtime_put_noidle(twl->dev); | 769 | pm_runtime_put_noidle(twl->dev); |
| 765 | pm_runtime_mark_last_busy(twl->dev); | 770 | pm_runtime_mark_last_busy(twl->dev); |
| 766 | pm_runtime_put_sync_suspend(twl->dev); | 771 | pm_runtime_dont_use_autosuspend(&pdev->dev); |
| 772 | pm_runtime_put_sync(twl->dev); | ||
| 767 | pm_runtime_disable(twl->dev); | 773 | pm_runtime_disable(twl->dev); |
| 768 | 774 | ||
| 769 | /* autogate 60MHz ULPI clock, | 775 | /* autogate 60MHz ULPI clock, |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index c06bb85c2839..3ec0025d19e7 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
| @@ -103,7 +103,6 @@ config DELL_SMBIOS | |||
| 103 | 103 | ||
| 104 | config DELL_LAPTOP | 104 | config DELL_LAPTOP |
| 105 | tristate "Dell Laptop Extras" | 105 | tristate "Dell Laptop Extras" |
| 106 | depends on X86 | ||
| 107 | depends on DELL_SMBIOS | 106 | depends on DELL_SMBIOS |
| 108 | depends on DMI | 107 | depends on DMI |
| 109 | depends on BACKLIGHT_CLASS_DEVICE | 108 | depends on BACKLIGHT_CLASS_DEVICE |
| @@ -505,7 +504,7 @@ config THINKPAD_ACPI_HOTKEY_POLL | |||
| 505 | 504 | ||
| 506 | config SENSORS_HDAPS | 505 | config SENSORS_HDAPS |
| 507 | tristate "Thinkpad Hard Drive Active Protection System (hdaps)" | 506 | tristate "Thinkpad Hard Drive Active Protection System (hdaps)" |
| 508 | depends on INPUT && X86 | 507 | depends on INPUT |
| 509 | select INPUT_POLLDEV | 508 | select INPUT_POLLDEV |
| 510 | default n | 509 | default n |
| 511 | help | 510 | help |
| @@ -749,7 +748,7 @@ config TOSHIBA_WMI | |||
| 749 | 748 | ||
| 750 | config ACPI_CMPC | 749 | config ACPI_CMPC |
| 751 | tristate "CMPC Laptop Extras" | 750 | tristate "CMPC Laptop Extras" |
| 752 | depends on X86 && ACPI | 751 | depends on ACPI |
| 753 | depends on RFKILL || RFKILL=n | 752 | depends on RFKILL || RFKILL=n |
| 754 | select INPUT | 753 | select INPUT |
| 755 | select BACKLIGHT_CLASS_DEVICE | 754 | select BACKLIGHT_CLASS_DEVICE |
| @@ -848,7 +847,7 @@ config INTEL_IMR | |||
| 848 | 847 | ||
| 849 | config INTEL_PMC_CORE | 848 | config INTEL_PMC_CORE |
| 850 | bool "Intel PMC Core driver" | 849 | bool "Intel PMC Core driver" |
| 851 | depends on X86 && PCI | 850 | depends on PCI |
| 852 | ---help--- | 851 | ---help--- |
| 853 | The Intel Platform Controller Hub for Intel Core SoCs provides access | 852 | The Intel Platform Controller Hub for Intel Core SoCs provides access |
| 854 | to Power Management Controller registers via a PCI interface. This | 853 | to Power Management Controller registers via a PCI interface. This |
| @@ -860,7 +859,7 @@ config INTEL_PMC_CORE | |||
| 860 | 859 | ||
| 861 | config IBM_RTL | 860 | config IBM_RTL |
| 862 | tristate "Device driver to enable PRTL support" | 861 | tristate "Device driver to enable PRTL support" |
| 863 | depends on X86 && PCI | 862 | depends on PCI |
| 864 | ---help--- | 863 | ---help--- |
| 865 | Enable support for IBM Premium Real Time Mode (PRTM). | 864 | Enable support for IBM Premium Real Time Mode (PRTM). |
| 866 | This module will allow you the enter and exit PRTM in the BIOS via | 865 | This module will allow you the enter and exit PRTM in the BIOS via |
| @@ -894,7 +893,6 @@ config XO15_EBOOK | |||
| 894 | 893 | ||
| 895 | config SAMSUNG_LAPTOP | 894 | config SAMSUNG_LAPTOP |
| 896 | tristate "Samsung Laptop driver" | 895 | tristate "Samsung Laptop driver" |
| 897 | depends on X86 | ||
| 898 | depends on RFKILL || RFKILL = n | 896 | depends on RFKILL || RFKILL = n |
| 899 | depends on ACPI_VIDEO || ACPI_VIDEO = n | 897 | depends on ACPI_VIDEO || ACPI_VIDEO = n |
| 900 | depends on BACKLIGHT_CLASS_DEVICE | 898 | depends on BACKLIGHT_CLASS_DEVICE |
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 4a23fbc66b71..d1a091b93192 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c | |||
| @@ -567,6 +567,7 @@ static void ideapad_sysfs_exit(struct ideapad_private *priv) | |||
| 567 | static const struct key_entry ideapad_keymap[] = { | 567 | static const struct key_entry ideapad_keymap[] = { |
| 568 | { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } }, | 568 | { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } }, |
| 569 | { KE_KEY, 7, { KEY_CAMERA } }, | 569 | { KE_KEY, 7, { KEY_CAMERA } }, |
| 570 | { KE_KEY, 8, { KEY_MICMUTE } }, | ||
| 570 | { KE_KEY, 11, { KEY_F16 } }, | 571 | { KE_KEY, 11, { KEY_F16 } }, |
| 571 | { KE_KEY, 13, { KEY_WLAN } }, | 572 | { KE_KEY, 13, { KEY_WLAN } }, |
| 572 | { KE_KEY, 16, { KEY_PROG1 } }, | 573 | { KE_KEY, 16, { KEY_PROG1 } }, |
| @@ -809,6 +810,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) | |||
| 809 | break; | 810 | break; |
| 810 | case 13: | 811 | case 13: |
| 811 | case 11: | 812 | case 11: |
| 813 | case 8: | ||
| 812 | case 7: | 814 | case 7: |
| 813 | case 6: | 815 | case 6: |
| 814 | ideapad_input_report(priv, vpc_bit); | 816 | ideapad_input_report(priv, vpc_bit); |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index c3bfa1fe95bf..b65ce7519411 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
| @@ -2043,6 +2043,7 @@ static int hotkey_autosleep_ack; | |||
| 2043 | 2043 | ||
| 2044 | static u32 hotkey_orig_mask; /* events the BIOS had enabled */ | 2044 | static u32 hotkey_orig_mask; /* events the BIOS had enabled */ |
| 2045 | static u32 hotkey_all_mask; /* all events supported in fw */ | 2045 | static u32 hotkey_all_mask; /* all events supported in fw */ |
| 2046 | static u32 hotkey_adaptive_all_mask; /* all adaptive events supported in fw */ | ||
| 2046 | static u32 hotkey_reserved_mask; /* events better left disabled */ | 2047 | static u32 hotkey_reserved_mask; /* events better left disabled */ |
| 2047 | static u32 hotkey_driver_mask; /* events needed by the driver */ | 2048 | static u32 hotkey_driver_mask; /* events needed by the driver */ |
| 2048 | static u32 hotkey_user_mask; /* events visible to userspace */ | 2049 | static u32 hotkey_user_mask; /* events visible to userspace */ |
| @@ -2742,6 +2743,17 @@ static ssize_t hotkey_all_mask_show(struct device *dev, | |||
| 2742 | 2743 | ||
| 2743 | static DEVICE_ATTR_RO(hotkey_all_mask); | 2744 | static DEVICE_ATTR_RO(hotkey_all_mask); |
| 2744 | 2745 | ||
| 2746 | /* sysfs hotkey all_mask ----------------------------------------------- */ | ||
| 2747 | static ssize_t hotkey_adaptive_all_mask_show(struct device *dev, | ||
| 2748 | struct device_attribute *attr, | ||
| 2749 | char *buf) | ||
| 2750 | { | ||
| 2751 | return snprintf(buf, PAGE_SIZE, "0x%08x\n", | ||
| 2752 | hotkey_adaptive_all_mask | hotkey_source_mask); | ||
| 2753 | } | ||
| 2754 | |||
| 2755 | static DEVICE_ATTR_RO(hotkey_adaptive_all_mask); | ||
| 2756 | |||
| 2745 | /* sysfs hotkey recommended_mask --------------------------------------- */ | 2757 | /* sysfs hotkey recommended_mask --------------------------------------- */ |
| 2746 | static ssize_t hotkey_recommended_mask_show(struct device *dev, | 2758 | static ssize_t hotkey_recommended_mask_show(struct device *dev, |
| 2747 | struct device_attribute *attr, | 2759 | struct device_attribute *attr, |
| @@ -2985,6 +2997,7 @@ static struct attribute *hotkey_attributes[] __initdata = { | |||
| 2985 | &dev_attr_wakeup_hotunplug_complete.attr, | 2997 | &dev_attr_wakeup_hotunplug_complete.attr, |
| 2986 | &dev_attr_hotkey_mask.attr, | 2998 | &dev_attr_hotkey_mask.attr, |
| 2987 | &dev_attr_hotkey_all_mask.attr, | 2999 | &dev_attr_hotkey_all_mask.attr, |
| 3000 | &dev_attr_hotkey_adaptive_all_mask.attr, | ||
| 2988 | &dev_attr_hotkey_recommended_mask.attr, | 3001 | &dev_attr_hotkey_recommended_mask.attr, |
| 2989 | #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL | 3002 | #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL |
| 2990 | &dev_attr_hotkey_source_mask.attr, | 3003 | &dev_attr_hotkey_source_mask.attr, |
| @@ -3321,20 +3334,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
| 3321 | if (!tp_features.hotkey) | 3334 | if (!tp_features.hotkey) |
| 3322 | return 1; | 3335 | return 1; |
| 3323 | 3336 | ||
| 3324 | /* | ||
| 3325 | * Check if we have an adaptive keyboard, like on the | ||
| 3326 | * Lenovo Carbon X1 2014 (2nd Gen). | ||
| 3327 | */ | ||
| 3328 | if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { | ||
| 3329 | if ((hkeyv >> 8) == 2) { | ||
| 3330 | tp_features.has_adaptive_kbd = true; | ||
| 3331 | res = sysfs_create_group(&tpacpi_pdev->dev.kobj, | ||
| 3332 | &adaptive_kbd_attr_group); | ||
| 3333 | if (res) | ||
| 3334 | goto err_exit; | ||
| 3335 | } | ||
| 3336 | } | ||
| 3337 | |||
| 3338 | quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable, | 3337 | quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable, |
| 3339 | ARRAY_SIZE(tpacpi_hotkey_qtable)); | 3338 | ARRAY_SIZE(tpacpi_hotkey_qtable)); |
| 3340 | 3339 | ||
| @@ -3357,30 +3356,70 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
| 3357 | A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking | 3356 | A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking |
| 3358 | for HKEY interface version 0x100 */ | 3357 | for HKEY interface version 0x100 */ |
| 3359 | if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { | 3358 | if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { |
| 3360 | if ((hkeyv >> 8) != 1) { | 3359 | vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, |
| 3361 | pr_err("unknown version of the HKEY interface: 0x%x\n", | 3360 | "firmware HKEY interface version: 0x%x\n", |
| 3362 | hkeyv); | 3361 | hkeyv); |
| 3363 | pr_err("please report this to %s\n", TPACPI_MAIL); | 3362 | |
| 3364 | } else { | 3363 | switch (hkeyv >> 8) { |
| 3364 | case 1: | ||
| 3365 | /* | 3365 | /* |
| 3366 | * MHKV 0x100 in A31, R40, R40e, | 3366 | * MHKV 0x100 in A31, R40, R40e, |
| 3367 | * T4x, X31, and later | 3367 | * T4x, X31, and later |
| 3368 | */ | 3368 | */ |
| 3369 | vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, | ||
| 3370 | "firmware HKEY interface version: 0x%x\n", | ||
| 3371 | hkeyv); | ||
| 3372 | 3369 | ||
| 3373 | /* Paranoia check AND init hotkey_all_mask */ | 3370 | /* Paranoia check AND init hotkey_all_mask */ |
| 3374 | if (!acpi_evalf(hkey_handle, &hotkey_all_mask, | 3371 | if (!acpi_evalf(hkey_handle, &hotkey_all_mask, |
| 3375 | "MHKA", "qd")) { | 3372 | "MHKA", "qd")) { |
| 3376 | pr_err("missing MHKA handler, " | 3373 | pr_err("missing MHKA handler, please report this to %s\n", |
| 3377 | "please report this to %s\n", | ||
| 3378 | TPACPI_MAIL); | 3374 | TPACPI_MAIL); |
| 3379 | /* Fallback: pre-init for FN+F3,F4,F12 */ | 3375 | /* Fallback: pre-init for FN+F3,F4,F12 */ |
| 3380 | hotkey_all_mask = 0x080cU; | 3376 | hotkey_all_mask = 0x080cU; |
| 3381 | } else { | 3377 | } else { |
| 3382 | tp_features.hotkey_mask = 1; | 3378 | tp_features.hotkey_mask = 1; |
| 3383 | } | 3379 | } |
| 3380 | break; | ||
| 3381 | |||
| 3382 | case 2: | ||
| 3383 | /* | ||
| 3384 | * MHKV 0x200 in X1, T460s, X260, T560, X1 Tablet (2016) | ||
| 3385 | */ | ||
| 3386 | |||
| 3387 | /* Paranoia check AND init hotkey_all_mask */ | ||
| 3388 | if (!acpi_evalf(hkey_handle, &hotkey_all_mask, | ||
| 3389 | "MHKA", "dd", 1)) { | ||
| 3390 | pr_err("missing MHKA handler, please report this to %s\n", | ||
| 3391 | TPACPI_MAIL); | ||
| 3392 | /* Fallback: pre-init for FN+F3,F4,F12 */ | ||
| 3393 | hotkey_all_mask = 0x080cU; | ||
| 3394 | } else { | ||
| 3395 | tp_features.hotkey_mask = 1; | ||
| 3396 | } | ||
| 3397 | |||
| 3398 | /* | ||
| 3399 | * Check if we have an adaptive keyboard, like on the | ||
| 3400 | * Lenovo Carbon X1 2014 (2nd Gen). | ||
| 3401 | */ | ||
| 3402 | if (acpi_evalf(hkey_handle, &hotkey_adaptive_all_mask, | ||
| 3403 | "MHKA", "dd", 2)) { | ||
| 3404 | if (hotkey_adaptive_all_mask != 0) { | ||
| 3405 | tp_features.has_adaptive_kbd = true; | ||
| 3406 | res = sysfs_create_group( | ||
| 3407 | &tpacpi_pdev->dev.kobj, | ||
| 3408 | &adaptive_kbd_attr_group); | ||
| 3409 | if (res) | ||
| 3410 | goto err_exit; | ||
| 3411 | } | ||
| 3412 | } else { | ||
| 3413 | tp_features.has_adaptive_kbd = false; | ||
| 3414 | hotkey_adaptive_all_mask = 0x0U; | ||
| 3415 | } | ||
| 3416 | break; | ||
| 3417 | |||
| 3418 | default: | ||
| 3419 | pr_err("unknown version of the HKEY interface: 0x%x\n", | ||
| 3420 | hkeyv); | ||
| 3421 | pr_err("please report this to %s\n", TPACPI_MAIL); | ||
| 3422 | break; | ||
| 3384 | } | 3423 | } |
| 3385 | } | 3424 | } |
| 3386 | 3425 | ||
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index dba3843c53b8..ed337a8c34ab 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c | |||
| @@ -457,7 +457,8 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state) | |||
| 457 | { | 457 | { |
| 458 | int err; | 458 | int err; |
| 459 | 459 | ||
| 460 | if (!pwm) | 460 | if (!pwm || !state || !state->period || |
| 461 | state->duty_cycle > state->period) | ||
| 461 | return -EINVAL; | 462 | return -EINVAL; |
| 462 | 463 | ||
| 463 | if (!memcmp(state, &pwm->state, sizeof(*state))) | 464 | if (!memcmp(state, &pwm->state, sizeof(*state))) |
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c index f994c7eaf41c..14fc011faa32 100644 --- a/drivers/pwm/pwm-atmel-hlcdc.c +++ b/drivers/pwm/pwm-atmel-hlcdc.c | |||
| @@ -272,7 +272,7 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev) | |||
| 272 | chip->chip.of_pwm_n_cells = 3; | 272 | chip->chip.of_pwm_n_cells = 3; |
| 273 | chip->chip.can_sleep = 1; | 273 | chip->chip.can_sleep = 1; |
| 274 | 274 | ||
| 275 | ret = pwmchip_add(&chip->chip); | 275 | ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED); |
| 276 | if (ret) { | 276 | if (ret) { |
| 277 | clk_disable_unprepare(hlcdc->periph_clk); | 277 | clk_disable_unprepare(hlcdc->periph_clk); |
| 278 | return ret; | 278 | return ret; |
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index d98599249a05..01695d48dd54 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c | |||
| @@ -152,7 +152,7 @@ static ssize_t enable_store(struct device *child, | |||
| 152 | goto unlock; | 152 | goto unlock; |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | pwm_apply_state(pwm, &state); | 155 | ret = pwm_apply_state(pwm, &state); |
| 156 | 156 | ||
| 157 | unlock: | 157 | unlock: |
| 158 | mutex_unlock(&export->lock); | 158 | mutex_unlock(&export->lock); |
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 56a17ec5b5ef..526bf23dcb49 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c | |||
| @@ -140,6 +140,19 @@ static const struct regulator_ops rpm_smps_ldo_ops = { | |||
| 140 | .enable = rpm_reg_enable, | 140 | .enable = rpm_reg_enable, |
| 141 | .disable = rpm_reg_disable, | 141 | .disable = rpm_reg_disable, |
| 142 | .is_enabled = rpm_reg_is_enabled, | 142 | .is_enabled = rpm_reg_is_enabled, |
| 143 | .list_voltage = regulator_list_voltage_linear_range, | ||
| 144 | |||
| 145 | .get_voltage = rpm_reg_get_voltage, | ||
| 146 | .set_voltage = rpm_reg_set_voltage, | ||
| 147 | |||
| 148 | .set_load = rpm_reg_set_load, | ||
| 149 | }; | ||
| 150 | |||
| 151 | static const struct regulator_ops rpm_smps_ldo_ops_fixed = { | ||
| 152 | .enable = rpm_reg_enable, | ||
| 153 | .disable = rpm_reg_disable, | ||
| 154 | .is_enabled = rpm_reg_is_enabled, | ||
| 155 | .list_voltage = regulator_list_voltage_linear_range, | ||
| 143 | 156 | ||
| 144 | .get_voltage = rpm_reg_get_voltage, | 157 | .get_voltage = rpm_reg_get_voltage, |
| 145 | .set_voltage = rpm_reg_set_voltage, | 158 | .set_voltage = rpm_reg_set_voltage, |
| @@ -247,7 +260,7 @@ static const struct regulator_desc pm8941_nldo = { | |||
| 247 | static const struct regulator_desc pm8941_lnldo = { | 260 | static const struct regulator_desc pm8941_lnldo = { |
| 248 | .fixed_uV = 1740000, | 261 | .fixed_uV = 1740000, |
| 249 | .n_voltages = 1, | 262 | .n_voltages = 1, |
| 250 | .ops = &rpm_smps_ldo_ops, | 263 | .ops = &rpm_smps_ldo_ops_fixed, |
| 251 | }; | 264 | }; |
| 252 | 265 | ||
| 253 | static const struct regulator_desc pm8941_switch = { | 266 | static const struct regulator_desc pm8941_switch = { |
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c index 572816e30095..c139890c1514 100644 --- a/drivers/regulator/tps51632-regulator.c +++ b/drivers/regulator/tps51632-regulator.c | |||
| @@ -94,11 +94,14 @@ static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev, | |||
| 94 | int ramp_delay) | 94 | int ramp_delay) |
| 95 | { | 95 | { |
| 96 | struct tps51632_chip *tps = rdev_get_drvdata(rdev); | 96 | struct tps51632_chip *tps = rdev_get_drvdata(rdev); |
| 97 | int bit = ramp_delay/6000; | 97 | int bit; |
| 98 | int ret; | 98 | int ret; |
| 99 | 99 | ||
| 100 | if (bit) | 100 | if (ramp_delay == 0) |
| 101 | bit--; | 101 | bit = 0; |
| 102 | else | ||
| 103 | bit = DIV_ROUND_UP(ramp_delay, 6000) - 1; | ||
| 104 | |||
| 102 | ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit)); | 105 | ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit)); |
| 103 | if (ret < 0) | 106 | if (ret < 0) |
| 104 | dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret); | 107 | dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret); |
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index d4c285688ce9..3ddc85e6efd6 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c | |||
| @@ -1122,7 +1122,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, | |||
| 1122 | } else { | 1122 | } else { |
| 1123 | struct scsi_cmnd *SCp; | 1123 | struct scsi_cmnd *SCp; |
| 1124 | 1124 | ||
| 1125 | SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG); | 1125 | SCp = SDp->current_cmnd; |
| 1126 | if(unlikely(SCp == NULL)) { | 1126 | if(unlikely(SCp == NULL)) { |
| 1127 | sdev_printk(KERN_ERR, SDp, | 1127 | sdev_printk(KERN_ERR, SDp, |
| 1128 | "no saved request for untagged cmd\n"); | 1128 | "no saved request for untagged cmd\n"); |
| @@ -1826,7 +1826,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) | |||
| 1826 | slot->tag, slot); | 1826 | slot->tag, slot); |
| 1827 | } else { | 1827 | } else { |
| 1828 | slot->tag = SCSI_NO_TAG; | 1828 | slot->tag = SCSI_NO_TAG; |
| 1829 | /* must populate current_cmnd for scsi_host_find_tag to work */ | 1829 | /* save current command for reselection */ |
| 1830 | SCp->device->current_cmnd = SCp; | 1830 | SCp->device->current_cmnd = SCp; |
| 1831 | } | 1831 | } |
| 1832 | /* sanity check: some of the commands generated by the mid-layer | 1832 | /* sanity check: some of the commands generated by the mid-layer |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 3408578b08d6..ff41c310c900 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
| @@ -230,6 +230,7 @@ static struct { | |||
| 230 | {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, | 230 | {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, |
| 231 | {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, | 231 | {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, |
| 232 | {"Promise", "", NULL, BLIST_SPARSELUN}, | 232 | {"Promise", "", NULL, BLIST_SPARSELUN}, |
| 233 | {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES}, | ||
| 233 | {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, | 234 | {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, |
| 234 | {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024}, | 235 | {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024}, |
| 235 | {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, | 236 | {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index a8b610eaa0ca..106a6adbd6f1 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
| @@ -1128,7 +1128,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn) | |||
| 1128 | */ | 1128 | */ |
| 1129 | void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q) | 1129 | void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q) |
| 1130 | { | 1130 | { |
| 1131 | scmd->device->host->host_failed--; | ||
| 1132 | scmd->eh_eflags = 0; | 1131 | scmd->eh_eflags = 0; |
| 1133 | list_move_tail(&scmd->eh_entry, done_q); | 1132 | list_move_tail(&scmd->eh_entry, done_q); |
| 1134 | } | 1133 | } |
| @@ -2227,6 +2226,9 @@ int scsi_error_handler(void *data) | |||
| 2227 | else | 2226 | else |
| 2228 | scsi_unjam_host(shost); | 2227 | scsi_unjam_host(shost); |
| 2229 | 2228 | ||
| 2229 | /* All scmds have been handled */ | ||
| 2230 | shost->host_failed = 0; | ||
| 2231 | |||
| 2230 | /* | 2232 | /* |
| 2231 | * Note - if the above fails completely, the action is to take | 2233 | * Note - if the above fails completely, the action is to take |
| 2232 | * individual devices offline and flush the queue of any | 2234 | * individual devices offline and flush the queue of any |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index f459dff30512..60bff78e9ead 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -2867,10 +2867,10 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
| 2867 | if (sdkp->opt_xfer_blocks && | 2867 | if (sdkp->opt_xfer_blocks && |
| 2868 | sdkp->opt_xfer_blocks <= dev_max && | 2868 | sdkp->opt_xfer_blocks <= dev_max && |
| 2869 | sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && | 2869 | sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && |
| 2870 | sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE) | 2870 | logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) { |
| 2871 | rw_max = q->limits.io_opt = | 2871 | q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); |
| 2872 | sdkp->opt_xfer_blocks * sdp->sector_size; | 2872 | rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); |
| 2873 | else | 2873 | } else |
| 2874 | rw_max = BLK_DEF_MAX_SECTORS; | 2874 | rw_max = BLK_DEF_MAX_SECTORS; |
| 2875 | 2875 | ||
| 2876 | /* Combine with controller limits */ | 2876 | /* Combine with controller limits */ |
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 654630bb7d0e..765a6f1ac1b7 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h | |||
| @@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo | |||
| 151 | return blocks << (ilog2(sdev->sector_size) - 9); | 151 | return blocks << (ilog2(sdev->sector_size) - 9); |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks) | ||
| 155 | { | ||
| 156 | return blocks * sdev->sector_size; | ||
| 157 | } | ||
| 158 | |||
| 154 | /* | 159 | /* |
| 155 | * A DIF-capable target device can be formatted with different | 160 | * A DIF-capable target device can be formatted with different |
| 156 | * protection schemes. Currently 0 through 3 are defined: | 161 | * protection schemes. Currently 0 through 3 are defined: |
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index bbfee53cfcf5..845e49a52430 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c | |||
| @@ -2521,12 +2521,13 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) | |||
| 2521 | return 0; | 2521 | return 0; |
| 2522 | 2522 | ||
| 2523 | failed: | 2523 | failed: |
| 2524 | if (ni) | 2524 | if (ni) { |
| 2525 | lnet_ni_decref(ni); | 2525 | lnet_ni_decref(ni); |
| 2526 | rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni); | ||
| 2527 | rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni); | ||
| 2528 | } | ||
| 2526 | 2529 | ||
| 2527 | rej.ibr_version = version; | 2530 | rej.ibr_version = version; |
| 2528 | rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni); | ||
| 2529 | rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni); | ||
| 2530 | kiblnd_reject(cmid, &rej); | 2531 | kiblnd_reject(cmid, &rej); |
| 2531 | 2532 | ||
| 2532 | return -ECONNREFUSED; | 2533 | return -ECONNREFUSED; |
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c index c17870cddb5b..fbce1f7e68ca 100644 --- a/drivers/staging/rtl8188eu/core/rtw_efuse.c +++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c | |||
| @@ -102,7 +102,7 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf) | |||
| 102 | if (!efuseTbl) | 102 | if (!efuseTbl) |
| 103 | return; | 103 | return; |
| 104 | 104 | ||
| 105 | eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(*eFuseWord)); | 105 | eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16)); |
| 106 | if (!eFuseWord) { | 106 | if (!eFuseWord) { |
| 107 | DBG_88E("%s: alloc eFuseWord fail!\n", __func__); | 107 | DBG_88E("%s: alloc eFuseWord fail!\n", __func__); |
| 108 | goto eFuseWord_failed; | 108 | goto eFuseWord_failed; |
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c index 87ea3b844951..363f3a34ddce 100644 --- a/drivers/staging/rtl8188eu/hal/usb_halinit.c +++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c | |||
| @@ -2072,7 +2072,8 @@ void rtl8188eu_set_hal_ops(struct adapter *adapt) | |||
| 2072 | { | 2072 | { |
| 2073 | struct hal_ops *halfunc = &adapt->HalFunc; | 2073 | struct hal_ops *halfunc = &adapt->HalFunc; |
| 2074 | 2074 | ||
| 2075 | adapt->HalData = kzalloc(sizeof(*adapt->HalData), GFP_KERNEL); | 2075 | |
| 2076 | adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL); | ||
| 2076 | if (!adapt->HalData) | 2077 | if (!adapt->HalData) |
| 2077 | DBG_88E("cant not alloc memory for HAL DATA\n"); | 2078 | DBG_88E("cant not alloc memory for HAL DATA\n"); |
| 2078 | 2079 | ||
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 6ceac4f2d4b2..5b4b47ed948b 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c | |||
| @@ -857,14 +857,6 @@ __cpufreq_cooling_register(struct device_node *np, | |||
| 857 | goto free_power_table; | 857 | goto free_power_table; |
| 858 | } | 858 | } |
| 859 | 859 | ||
| 860 | snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", | ||
| 861 | cpufreq_dev->id); | ||
| 862 | |||
| 863 | cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, | ||
| 864 | &cpufreq_cooling_ops); | ||
| 865 | if (IS_ERR(cool_dev)) | ||
| 866 | goto remove_idr; | ||
| 867 | |||
| 868 | /* Fill freq-table in descending order of frequencies */ | 860 | /* Fill freq-table in descending order of frequencies */ |
| 869 | for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { | 861 | for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { |
| 870 | freq = find_next_max(table, freq); | 862 | freq = find_next_max(table, freq); |
| @@ -877,6 +869,14 @@ __cpufreq_cooling_register(struct device_node *np, | |||
| 877 | pr_debug("%s: freq:%u KHz\n", __func__, freq); | 869 | pr_debug("%s: freq:%u KHz\n", __func__, freq); |
| 878 | } | 870 | } |
| 879 | 871 | ||
| 872 | snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", | ||
| 873 | cpufreq_dev->id); | ||
| 874 | |||
| 875 | cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, | ||
| 876 | &cpufreq_cooling_ops); | ||
| 877 | if (IS_ERR(cool_dev)) | ||
| 878 | goto remove_idr; | ||
| 879 | |||
| 880 | cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; | 880 | cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; |
| 881 | cpufreq_dev->cool_dev = cool_dev; | 881 | cpufreq_dev->cool_dev = cool_dev; |
| 882 | 882 | ||
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 6dc810bce295..944a6dca0fcb 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
| @@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 44 | /* Creative SB Audigy 2 NX */ | 44 | /* Creative SB Audigy 2 NX */ |
| 45 | { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, | 45 | { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 46 | 46 | ||
| 47 | /* USB3503 */ | ||
| 48 | { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
| 49 | |||
| 47 | /* Microsoft Wireless Laser Mouse 6000 Receiver */ | 50 | /* Microsoft Wireless Laser Mouse 6000 Receiver */ |
| 48 | { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME }, | 51 | { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 49 | 52 | ||
| @@ -173,6 +176,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 173 | /* MAYA44USB sound device */ | 176 | /* MAYA44USB sound device */ |
| 174 | { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME }, | 177 | { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 175 | 178 | ||
| 179 | /* ASUS Base Station(T100) */ | ||
| 180 | { USB_DEVICE(0x0b05, 0x17e0), .driver_info = | ||
| 181 | USB_QUIRK_IGNORE_REMOTE_WAKEUP }, | ||
| 182 | |||
| 176 | /* Action Semiconductor flash disk */ | 183 | /* Action Semiconductor flash disk */ |
| 177 | { USB_DEVICE(0x10d6, 0x2200), .driver_info = | 184 | { USB_DEVICE(0x10d6, 0x2200), .driver_info = |
| 178 | USB_QUIRK_STRING_FETCH_255 }, | 185 | USB_QUIRK_STRING_FETCH_255 }, |
| @@ -188,26 +195,22 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 188 | { USB_DEVICE(0x1908, 0x1315), .driver_info = | 195 | { USB_DEVICE(0x1908, 0x1315), .driver_info = |
| 189 | USB_QUIRK_HONOR_BNUMINTERFACES }, | 196 | USB_QUIRK_HONOR_BNUMINTERFACES }, |
| 190 | 197 | ||
| 191 | /* INTEL VALUE SSD */ | ||
| 192 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
| 193 | |||
| 194 | /* USB3503 */ | ||
| 195 | { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
| 196 | |||
| 197 | /* ASUS Base Station(T100) */ | ||
| 198 | { USB_DEVICE(0x0b05, 0x17e0), .driver_info = | ||
| 199 | USB_QUIRK_IGNORE_REMOTE_WAKEUP }, | ||
| 200 | |||
| 201 | /* Protocol and OTG Electrical Test Device */ | 198 | /* Protocol and OTG Electrical Test Device */ |
| 202 | { USB_DEVICE(0x1a0a, 0x0200), .driver_info = | 199 | { USB_DEVICE(0x1a0a, 0x0200), .driver_info = |
| 203 | USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, | 200 | USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, |
| 204 | 201 | ||
| 202 | /* Acer C120 LED Projector */ | ||
| 203 | { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM }, | ||
| 204 | |||
| 205 | /* Blackmagic Design Intensity Shuttle */ | 205 | /* Blackmagic Design Intensity Shuttle */ |
| 206 | { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM }, | 206 | { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM }, |
| 207 | 207 | ||
| 208 | /* Blackmagic Design UltraStudio SDI */ | 208 | /* Blackmagic Design UltraStudio SDI */ |
| 209 | { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, | 209 | { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, |
| 210 | 210 | ||
| 211 | /* INTEL VALUE SSD */ | ||
| 212 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
| 213 | |||
| 211 | { } /* terminating entry must be last */ | 214 | { } /* terminating entry must be last */ |
| 212 | }; | 215 | }; |
| 213 | 216 | ||
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 3c58d633ce80..dec0b21fc626 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h | |||
| @@ -64,6 +64,17 @@ | |||
| 64 | DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \ | 64 | DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \ |
| 65 | dev_name(hsotg->dev), ##__VA_ARGS__) | 65 | dev_name(hsotg->dev), ##__VA_ARGS__) |
| 66 | 66 | ||
| 67 | #ifdef CONFIG_MIPS | ||
| 68 | /* | ||
| 69 | * There are some MIPS machines that can run in either big-endian | ||
| 70 | * or little-endian mode and that use the dwc2 register without | ||
| 71 | * a byteswap in both ways. | ||
| 72 | * Unlike other architectures, MIPS apparently does not require a | ||
| 73 | * barrier before the __raw_writel() to synchronize with DMA but does | ||
| 74 | * require the barrier after the __raw_writel() to serialize a set of | ||
| 75 | * writes. This set of operations was added specifically for MIPS and | ||
| 76 | * should only be used there. | ||
| 77 | */ | ||
| 67 | static inline u32 dwc2_readl(const void __iomem *addr) | 78 | static inline u32 dwc2_readl(const void __iomem *addr) |
| 68 | { | 79 | { |
| 69 | u32 value = __raw_readl(addr); | 80 | u32 value = __raw_readl(addr); |
| @@ -90,6 +101,22 @@ static inline void dwc2_writel(u32 value, void __iomem *addr) | |||
| 90 | pr_info("INFO:: wrote %08x to %p\n", value, addr); | 101 | pr_info("INFO:: wrote %08x to %p\n", value, addr); |
| 91 | #endif | 102 | #endif |
| 92 | } | 103 | } |
| 104 | #else | ||
| 105 | /* Normal architectures just use readl/write */ | ||
| 106 | static inline u32 dwc2_readl(const void __iomem *addr) | ||
| 107 | { | ||
| 108 | return readl(addr); | ||
| 109 | } | ||
| 110 | |||
| 111 | static inline void dwc2_writel(u32 value, void __iomem *addr) | ||
| 112 | { | ||
| 113 | writel(value, addr); | ||
| 114 | |||
| 115 | #ifdef DWC2_LOG_WRITES | ||
| 116 | pr_info("info:: wrote %08x to %p\n", value, addr); | ||
| 117 | #endif | ||
| 118 | } | ||
| 119 | #endif | ||
| 93 | 120 | ||
| 94 | /* Maximum number of Endpoints/HostChannels */ | 121 | /* Maximum number of Endpoints/HostChannels */ |
| 95 | #define MAX_EPS_CHANNELS 16 | 122 | #define MAX_EPS_CHANNELS 16 |
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 4c5e3005e1dc..26cf09d0fe3c 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c | |||
| @@ -1018,7 +1018,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg, | |||
| 1018 | return 1; | 1018 | return 1; |
| 1019 | } | 1019 | } |
| 1020 | 1020 | ||
| 1021 | static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value); | 1021 | static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now); |
| 1022 | 1022 | ||
| 1023 | /** | 1023 | /** |
| 1024 | * get_ep_head - return the first request on the endpoint | 1024 | * get_ep_head - return the first request on the endpoint |
| @@ -1094,7 +1094,7 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg, | |||
| 1094 | case USB_ENDPOINT_HALT: | 1094 | case USB_ENDPOINT_HALT: |
| 1095 | halted = ep->halted; | 1095 | halted = ep->halted; |
| 1096 | 1096 | ||
| 1097 | dwc2_hsotg_ep_sethalt(&ep->ep, set); | 1097 | dwc2_hsotg_ep_sethalt(&ep->ep, set, true); |
| 1098 | 1098 | ||
| 1099 | ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); | 1099 | ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); |
| 1100 | if (ret) { | 1100 | if (ret) { |
| @@ -2948,8 +2948,13 @@ static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) | |||
| 2948 | * dwc2_hsotg_ep_sethalt - set halt on a given endpoint | 2948 | * dwc2_hsotg_ep_sethalt - set halt on a given endpoint |
| 2949 | * @ep: The endpoint to set halt. | 2949 | * @ep: The endpoint to set halt. |
| 2950 | * @value: Set or unset the halt. | 2950 | * @value: Set or unset the halt. |
| 2951 | * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if | ||
| 2952 | * the endpoint is busy processing requests. | ||
| 2953 | * | ||
| 2954 | * We need to stall the endpoint immediately if request comes from set_feature | ||
| 2955 | * protocol command handler. | ||
| 2951 | */ | 2956 | */ |
| 2952 | static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value) | 2957 | static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now) |
| 2953 | { | 2958 | { |
| 2954 | struct dwc2_hsotg_ep *hs_ep = our_ep(ep); | 2959 | struct dwc2_hsotg_ep *hs_ep = our_ep(ep); |
| 2955 | struct dwc2_hsotg *hs = hs_ep->parent; | 2960 | struct dwc2_hsotg *hs = hs_ep->parent; |
| @@ -2969,6 +2974,17 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value) | |||
| 2969 | return 0; | 2974 | return 0; |
| 2970 | } | 2975 | } |
| 2971 | 2976 | ||
| 2977 | if (hs_ep->isochronous) { | ||
| 2978 | dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name); | ||
| 2979 | return -EINVAL; | ||
| 2980 | } | ||
| 2981 | |||
| 2982 | if (!now && value && !list_empty(&hs_ep->queue)) { | ||
| 2983 | dev_dbg(hs->dev, "%s request is pending, cannot halt\n", | ||
| 2984 | ep->name); | ||
| 2985 | return -EAGAIN; | ||
| 2986 | } | ||
| 2987 | |||
| 2972 | if (hs_ep->dir_in) { | 2988 | if (hs_ep->dir_in) { |
| 2973 | epreg = DIEPCTL(index); | 2989 | epreg = DIEPCTL(index); |
| 2974 | epctl = dwc2_readl(hs->regs + epreg); | 2990 | epctl = dwc2_readl(hs->regs + epreg); |
| @@ -3020,7 +3036,7 @@ static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value) | |||
| 3020 | int ret = 0; | 3036 | int ret = 0; |
| 3021 | 3037 | ||
| 3022 | spin_lock_irqsave(&hs->lock, flags); | 3038 | spin_lock_irqsave(&hs->lock, flags); |
| 3023 | ret = dwc2_hsotg_ep_sethalt(ep, value); | 3039 | ret = dwc2_hsotg_ep_sethalt(ep, value, false); |
| 3024 | spin_unlock_irqrestore(&hs->lock, flags); | 3040 | spin_unlock_irqrestore(&hs->lock, flags); |
| 3025 | 3041 | ||
| 3026 | return ret; | 3042 | return ret; |
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 7ddf9449a063..654050684f4f 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h | |||
| @@ -402,6 +402,7 @@ | |||
| 402 | #define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f) | 402 | #define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f) |
| 403 | #define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F) | 403 | #define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F) |
| 404 | #define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11) | 404 | #define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11) |
| 405 | #define DWC3_DEPCMD_CLEARPENDIN (1 << 11) | ||
| 405 | #define DWC3_DEPCMD_CMDACT (1 << 10) | 406 | #define DWC3_DEPCMD_CMDACT (1 << 10) |
| 406 | #define DWC3_DEPCMD_CMDIOC (1 << 8) | 407 | #define DWC3_DEPCMD_CMDIOC (1 << 8) |
| 407 | 408 | ||
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index dd5cb5577dca..2f1fb7e7aa54 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c | |||
| @@ -128,12 +128,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev) | |||
| 128 | 128 | ||
| 129 | platform_set_drvdata(pdev, exynos); | 129 | platform_set_drvdata(pdev, exynos); |
| 130 | 130 | ||
| 131 | ret = dwc3_exynos_register_phys(exynos); | ||
| 132 | if (ret) { | ||
| 133 | dev_err(dev, "couldn't register PHYs\n"); | ||
| 134 | return ret; | ||
| 135 | } | ||
| 136 | |||
| 137 | exynos->dev = dev; | 131 | exynos->dev = dev; |
| 138 | 132 | ||
| 139 | exynos->clk = devm_clk_get(dev, "usbdrd30"); | 133 | exynos->clk = devm_clk_get(dev, "usbdrd30"); |
| @@ -183,20 +177,29 @@ static int dwc3_exynos_probe(struct platform_device *pdev) | |||
| 183 | goto err3; | 177 | goto err3; |
| 184 | } | 178 | } |
| 185 | 179 | ||
| 180 | ret = dwc3_exynos_register_phys(exynos); | ||
| 181 | if (ret) { | ||
| 182 | dev_err(dev, "couldn't register PHYs\n"); | ||
| 183 | goto err4; | ||
| 184 | } | ||
| 185 | |||
| 186 | if (node) { | 186 | if (node) { |
| 187 | ret = of_platform_populate(node, NULL, NULL, dev); | 187 | ret = of_platform_populate(node, NULL, NULL, dev); |
| 188 | if (ret) { | 188 | if (ret) { |
| 189 | dev_err(dev, "failed to add dwc3 core\n"); | 189 | dev_err(dev, "failed to add dwc3 core\n"); |
| 190 | goto err4; | 190 | goto err5; |
| 191 | } | 191 | } |
| 192 | } else { | 192 | } else { |
| 193 | dev_err(dev, "no device node, failed to add dwc3 core\n"); | 193 | dev_err(dev, "no device node, failed to add dwc3 core\n"); |
| 194 | ret = -ENODEV; | 194 | ret = -ENODEV; |
| 195 | goto err4; | 195 | goto err5; |
| 196 | } | 196 | } |
| 197 | 197 | ||
| 198 | return 0; | 198 | return 0; |
| 199 | 199 | ||
| 200 | err5: | ||
| 201 | platform_device_unregister(exynos->usb2_phy); | ||
| 202 | platform_device_unregister(exynos->usb3_phy); | ||
| 200 | err4: | 203 | err4: |
| 201 | regulator_disable(exynos->vdd10); | 204 | regulator_disable(exynos->vdd10); |
| 202 | err3: | 205 | err3: |
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index 5c0adb9c6fb2..50d6ae6f88bc 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c | |||
| @@ -129,12 +129,18 @@ static int st_dwc3_drd_init(struct st_dwc3 *dwc3_data) | |||
| 129 | switch (dwc3_data->dr_mode) { | 129 | switch (dwc3_data->dr_mode) { |
| 130 | case USB_DR_MODE_PERIPHERAL: | 130 | case USB_DR_MODE_PERIPHERAL: |
| 131 | 131 | ||
| 132 | val &= ~(USB3_FORCE_VBUSVALID | USB3_DELAY_VBUSVALID | 132 | val &= ~(USB3_DELAY_VBUSVALID |
| 133 | | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3) | 133 | | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3) |
| 134 | | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2 | 134 | | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2 |
| 135 | | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2); | 135 | | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2); |
| 136 | 136 | ||
| 137 | val |= USB3_DEVICE_NOT_HOST; | 137 | /* |
| 138 | * USB3_PORT2_FORCE_VBUSVALID When '1' and when | ||
| 139 | * USB3_PORT2_DEVICE_NOT_HOST = 1, forces VBUSVLDEXT2 input | ||
| 140 | * of the pico PHY to 1. | ||
| 141 | */ | ||
| 142 | |||
| 143 | val |= USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID; | ||
| 138 | break; | 144 | break; |
| 139 | 145 | ||
| 140 | case USB_DR_MODE_HOST: | 146 | case USB_DR_MODE_HOST: |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 9a7d0bd15dc3..07248ff1be5c 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
| @@ -347,6 +347,28 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, | |||
| 347 | return ret; | 347 | return ret; |
| 348 | } | 348 | } |
| 349 | 349 | ||
| 350 | static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep) | ||
| 351 | { | ||
| 352 | struct dwc3 *dwc = dep->dwc; | ||
| 353 | struct dwc3_gadget_ep_cmd_params params; | ||
| 354 | u32 cmd = DWC3_DEPCMD_CLEARSTALL; | ||
| 355 | |||
| 356 | /* | ||
| 357 | * As of core revision 2.60a the recommended programming model | ||
| 358 | * is to set the ClearPendIN bit when issuing a Clear Stall EP | ||
| 359 | * command for IN endpoints. This is to prevent an issue where | ||
| 360 | * some (non-compliant) hosts may not send ACK TPs for pending | ||
| 361 | * IN transfers due to a mishandled error condition. Synopsys | ||
| 362 | * STAR 9000614252. | ||
| 363 | */ | ||
| 364 | if (dep->direction && (dwc->revision >= DWC3_REVISION_260A)) | ||
| 365 | cmd |= DWC3_DEPCMD_CLEARPENDIN; | ||
| 366 | |||
| 367 | memset(¶ms, 0, sizeof(params)); | ||
| 368 | |||
| 369 | return dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); | ||
| 370 | } | ||
| 371 | |||
| 350 | static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, | 372 | static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, |
| 351 | struct dwc3_trb *trb) | 373 | struct dwc3_trb *trb) |
| 352 | { | 374 | { |
| @@ -1314,8 +1336,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) | |||
| 1314 | else | 1336 | else |
| 1315 | dep->flags |= DWC3_EP_STALL; | 1337 | dep->flags |= DWC3_EP_STALL; |
| 1316 | } else { | 1338 | } else { |
| 1317 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, | 1339 | ret = dwc3_send_clear_stall_ep_cmd(dep); |
| 1318 | DWC3_DEPCMD_CLEARSTALL, ¶ms); | ||
| 1319 | if (ret) | 1340 | if (ret) |
| 1320 | dev_err(dwc->dev, "failed to clear STALL on %s\n", | 1341 | dev_err(dwc->dev, "failed to clear STALL on %s\n", |
| 1321 | dep->name); | 1342 | dep->name); |
| @@ -2247,7 +2268,6 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) | |||
| 2247 | 2268 | ||
| 2248 | for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { | 2269 | for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { |
| 2249 | struct dwc3_ep *dep; | 2270 | struct dwc3_ep *dep; |
| 2250 | struct dwc3_gadget_ep_cmd_params params; | ||
| 2251 | int ret; | 2271 | int ret; |
| 2252 | 2272 | ||
| 2253 | dep = dwc->eps[epnum]; | 2273 | dep = dwc->eps[epnum]; |
| @@ -2259,9 +2279,7 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) | |||
| 2259 | 2279 | ||
| 2260 | dep->flags &= ~DWC3_EP_STALL; | 2280 | dep->flags &= ~DWC3_EP_STALL; |
| 2261 | 2281 | ||
| 2262 | memset(¶ms, 0, sizeof(params)); | 2282 | ret = dwc3_send_clear_stall_ep_cmd(dep); |
| 2263 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, | ||
| 2264 | DWC3_DEPCMD_CLEARSTALL, ¶ms); | ||
| 2265 | WARN_ON_ONCE(ret); | 2283 | WARN_ON_ONCE(ret); |
| 2266 | } | 2284 | } |
| 2267 | } | 2285 | } |
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index d67de0d22a2b..eb648485a58c 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c | |||
| @@ -1868,14 +1868,19 @@ unknown: | |||
| 1868 | } | 1868 | } |
| 1869 | break; | 1869 | break; |
| 1870 | } | 1870 | } |
| 1871 | req->length = value; | 1871 | |
| 1872 | req->context = cdev; | 1872 | if (value >= 0) { |
| 1873 | req->zero = value < w_length; | 1873 | req->length = value; |
| 1874 | value = composite_ep0_queue(cdev, req, GFP_ATOMIC); | 1874 | req->context = cdev; |
| 1875 | if (value < 0) { | 1875 | req->zero = value < w_length; |
| 1876 | DBG(cdev, "ep_queue --> %d\n", value); | 1876 | value = composite_ep0_queue(cdev, req, |
| 1877 | req->status = 0; | 1877 | GFP_ATOMIC); |
| 1878 | composite_setup_complete(gadget->ep0, req); | 1878 | if (value < 0) { |
| 1879 | DBG(cdev, "ep_queue --> %d\n", value); | ||
| 1880 | req->status = 0; | ||
| 1881 | composite_setup_complete(gadget->ep0, | ||
| 1882 | req); | ||
| 1883 | } | ||
| 1879 | } | 1884 | } |
| 1880 | return value; | 1885 | return value; |
| 1881 | } | 1886 | } |
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index b6f60ca8a035..70cf3477f951 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c | |||
| @@ -1401,6 +1401,7 @@ static const struct usb_gadget_driver configfs_driver_template = { | |||
| 1401 | .owner = THIS_MODULE, | 1401 | .owner = THIS_MODULE, |
| 1402 | .name = "configfs-gadget", | 1402 | .name = "configfs-gadget", |
| 1403 | }, | 1403 | }, |
| 1404 | .match_existing_only = 1, | ||
| 1404 | }; | 1405 | }; |
| 1405 | 1406 | ||
| 1406 | static struct config_group *gadgets_make( | 1407 | static struct config_group *gadgets_make( |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 73515d54e1cc..cc33d2667408 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
| @@ -2051,7 +2051,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, | |||
| 2051 | 2051 | ||
| 2052 | if (len < sizeof(*d) || | 2052 | if (len < sizeof(*d) || |
| 2053 | d->bFirstInterfaceNumber >= ffs->interfaces_count || | 2053 | d->bFirstInterfaceNumber >= ffs->interfaces_count || |
| 2054 | d->Reserved1) | 2054 | !d->Reserved1) |
| 2055 | return -EINVAL; | 2055 | return -EINVAL; |
| 2056 | for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) | 2056 | for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) |
| 2057 | if (d->Reserved2[i]) | 2057 | if (d->Reserved2[i]) |
| @@ -2729,6 +2729,7 @@ static int _ffs_func_bind(struct usb_configuration *c, | |||
| 2729 | func->ffs->ss_descs_count; | 2729 | func->ffs->ss_descs_count; |
| 2730 | 2730 | ||
| 2731 | int fs_len, hs_len, ss_len, ret, i; | 2731 | int fs_len, hs_len, ss_len, ret, i; |
| 2732 | struct ffs_ep *eps_ptr; | ||
| 2732 | 2733 | ||
| 2733 | /* Make it a single chunk, less management later on */ | 2734 | /* Make it a single chunk, less management later on */ |
| 2734 | vla_group(d); | 2735 | vla_group(d); |
| @@ -2777,12 +2778,9 @@ static int _ffs_func_bind(struct usb_configuration *c, | |||
| 2777 | ffs->raw_descs_length); | 2778 | ffs->raw_descs_length); |
| 2778 | 2779 | ||
| 2779 | memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz); | 2780 | memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz); |
| 2780 | for (ret = ffs->eps_count; ret; --ret) { | 2781 | eps_ptr = vla_ptr(vlabuf, d, eps); |
| 2781 | struct ffs_ep *ptr; | 2782 | for (i = 0; i < ffs->eps_count; i++) |
| 2782 | 2783 | eps_ptr[i].num = -1; | |
| 2783 | ptr = vla_ptr(vlabuf, d, eps); | ||
| 2784 | ptr[ret].num = -1; | ||
| 2785 | } | ||
| 2786 | 2784 | ||
| 2787 | /* Save pointers | 2785 | /* Save pointers |
| 2788 | * d_eps == vlabuf, func->eps used to kfree vlabuf later | 2786 | * d_eps == vlabuf, func->eps used to kfree vlabuf later |
| @@ -2851,7 +2849,7 @@ static int _ffs_func_bind(struct usb_configuration *c, | |||
| 2851 | goto error; | 2849 | goto error; |
| 2852 | 2850 | ||
| 2853 | func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table); | 2851 | func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table); |
| 2854 | if (c->cdev->use_os_string) | 2852 | if (c->cdev->use_os_string) { |
| 2855 | for (i = 0; i < ffs->interfaces_count; ++i) { | 2853 | for (i = 0; i < ffs->interfaces_count; ++i) { |
| 2856 | struct usb_os_desc *desc; | 2854 | struct usb_os_desc *desc; |
| 2857 | 2855 | ||
| @@ -2862,13 +2860,15 @@ static int _ffs_func_bind(struct usb_configuration *c, | |||
| 2862 | vla_ptr(vlabuf, d, ext_compat) + i * 16; | 2860 | vla_ptr(vlabuf, d, ext_compat) + i * 16; |
| 2863 | INIT_LIST_HEAD(&desc->ext_prop); | 2861 | INIT_LIST_HEAD(&desc->ext_prop); |
| 2864 | } | 2862 | } |
| 2865 | ret = ffs_do_os_descs(ffs->ms_os_descs_count, | 2863 | ret = ffs_do_os_descs(ffs->ms_os_descs_count, |
| 2866 | vla_ptr(vlabuf, d, raw_descs) + | 2864 | vla_ptr(vlabuf, d, raw_descs) + |
| 2867 | fs_len + hs_len + ss_len, | 2865 | fs_len + hs_len + ss_len, |
| 2868 | d_raw_descs__sz - fs_len - hs_len - ss_len, | 2866 | d_raw_descs__sz - fs_len - hs_len - |
| 2869 | __ffs_func_bind_do_os_desc, func); | 2867 | ss_len, |
| 2870 | if (unlikely(ret < 0)) | 2868 | __ffs_func_bind_do_os_desc, func); |
| 2871 | goto error; | 2869 | if (unlikely(ret < 0)) |
| 2870 | goto error; | ||
| 2871 | } | ||
| 2872 | func->function.os_desc_n = | 2872 | func->function.os_desc_n = |
| 2873 | c->cdev->use_os_string ? ffs->interfaces_count : 0; | 2873 | c->cdev->use_os_string ? ffs->interfaces_count : 0; |
| 2874 | 2874 | ||
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index c45104e3a64b..64706a789580 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c | |||
| @@ -161,14 +161,6 @@ static struct usb_endpoint_descriptor hs_ep_out_desc = { | |||
| 161 | .wMaxPacketSize = cpu_to_le16(512) | 161 | .wMaxPacketSize = cpu_to_le16(512) |
| 162 | }; | 162 | }; |
| 163 | 163 | ||
| 164 | static struct usb_qualifier_descriptor dev_qualifier = { | ||
| 165 | .bLength = sizeof(dev_qualifier), | ||
| 166 | .bDescriptorType = USB_DT_DEVICE_QUALIFIER, | ||
| 167 | .bcdUSB = cpu_to_le16(0x0200), | ||
| 168 | .bDeviceClass = USB_CLASS_PRINTER, | ||
| 169 | .bNumConfigurations = 1 | ||
| 170 | }; | ||
| 171 | |||
| 172 | static struct usb_descriptor_header *hs_printer_function[] = { | 164 | static struct usb_descriptor_header *hs_printer_function[] = { |
| 173 | (struct usb_descriptor_header *) &intf_desc, | 165 | (struct usb_descriptor_header *) &intf_desc, |
| 174 | (struct usb_descriptor_header *) &hs_ep_in_desc, | 166 | (struct usb_descriptor_header *) &hs_ep_in_desc, |
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index 35fe3c80cfc0..197f73386fac 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c | |||
| @@ -1445,16 +1445,18 @@ static void usbg_drop_tpg(struct se_portal_group *se_tpg) | |||
| 1445 | for (i = 0; i < TPG_INSTANCES; ++i) | 1445 | for (i = 0; i < TPG_INSTANCES; ++i) |
| 1446 | if (tpg_instances[i].tpg == tpg) | 1446 | if (tpg_instances[i].tpg == tpg) |
| 1447 | break; | 1447 | break; |
| 1448 | if (i < TPG_INSTANCES) | 1448 | if (i < TPG_INSTANCES) { |
| 1449 | tpg_instances[i].tpg = NULL; | 1449 | tpg_instances[i].tpg = NULL; |
| 1450 | opts = container_of(tpg_instances[i].func_inst, | 1450 | opts = container_of(tpg_instances[i].func_inst, |
| 1451 | struct f_tcm_opts, func_inst); | 1451 | struct f_tcm_opts, func_inst); |
| 1452 | mutex_lock(&opts->dep_lock); | 1452 | mutex_lock(&opts->dep_lock); |
| 1453 | if (opts->has_dep) | 1453 | if (opts->has_dep) |
| 1454 | module_put(opts->dependent); | 1454 | module_put(opts->dependent); |
| 1455 | else | 1455 | else |
| 1456 | configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item); | 1456 | configfs_undepend_item_unlocked( |
| 1457 | mutex_unlock(&opts->dep_lock); | 1457 | &opts->func_inst.group.cg_item); |
| 1458 | mutex_unlock(&opts->dep_lock); | ||
| 1459 | } | ||
| 1458 | mutex_unlock(&tpg_instances_lock); | 1460 | mutex_unlock(&tpg_instances_lock); |
| 1459 | 1461 | ||
| 1460 | kfree(tpg); | 1462 | kfree(tpg); |
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 186d4b162524..cd214ec8a601 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c | |||
| @@ -598,18 +598,6 @@ static struct usb_gadget_strings *fn_strings[] = { | |||
| 598 | NULL, | 598 | NULL, |
| 599 | }; | 599 | }; |
| 600 | 600 | ||
| 601 | static struct usb_qualifier_descriptor devqual_desc = { | ||
| 602 | .bLength = sizeof devqual_desc, | ||
| 603 | .bDescriptorType = USB_DT_DEVICE_QUALIFIER, | ||
| 604 | |||
| 605 | .bcdUSB = cpu_to_le16(0x200), | ||
| 606 | .bDeviceClass = USB_CLASS_MISC, | ||
| 607 | .bDeviceSubClass = 0x02, | ||
| 608 | .bDeviceProtocol = 0x01, | ||
| 609 | .bNumConfigurations = 1, | ||
| 610 | .bRESERVED = 0, | ||
| 611 | }; | ||
| 612 | |||
| 613 | static struct usb_interface_assoc_descriptor iad_desc = { | 601 | static struct usb_interface_assoc_descriptor iad_desc = { |
| 614 | .bLength = sizeof iad_desc, | 602 | .bLength = sizeof iad_desc, |
| 615 | .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, | 603 | .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, |
| @@ -1292,6 +1280,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr) | |||
| 1292 | 1280 | ||
| 1293 | if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { | 1281 | if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { |
| 1294 | struct cntrl_cur_lay3 c; | 1282 | struct cntrl_cur_lay3 c; |
| 1283 | memset(&c, 0, sizeof(struct cntrl_cur_lay3)); | ||
| 1295 | 1284 | ||
| 1296 | if (entity_id == USB_IN_CLK_ID) | 1285 | if (entity_id == USB_IN_CLK_ID) |
| 1297 | c.dCUR = p_srate; | 1286 | c.dCUR = p_srate; |
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c index d62683017cf3..990df221c629 100644 --- a/drivers/usb/gadget/function/storage_common.c +++ b/drivers/usb/gadget/function/storage_common.c | |||
| @@ -83,9 +83,7 @@ EXPORT_SYMBOL_GPL(fsg_fs_function); | |||
| 83 | * USB 2.0 devices need to expose both high speed and full speed | 83 | * USB 2.0 devices need to expose both high speed and full speed |
| 84 | * descriptors, unless they only run at full speed. | 84 | * descriptors, unless they only run at full speed. |
| 85 | * | 85 | * |
| 86 | * That means alternate endpoint descriptors (bigger packets) | 86 | * That means alternate endpoint descriptors (bigger packets). |
| 87 | * and a "device qualifier" ... plus more construction options | ||
| 88 | * for the configuration descriptor. | ||
| 89 | */ | 87 | */ |
| 90 | struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = { | 88 | struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = { |
| 91 | .bLength = USB_DT_ENDPOINT_SIZE, | 89 | .bLength = USB_DT_ENDPOINT_SIZE, |
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index e64479f882a5..aa3707bdebb4 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
| @@ -938,8 +938,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | |||
| 938 | struct usb_ep *ep = dev->gadget->ep0; | 938 | struct usb_ep *ep = dev->gadget->ep0; |
| 939 | struct usb_request *req = dev->req; | 939 | struct usb_request *req = dev->req; |
| 940 | 940 | ||
| 941 | if ((retval = setup_req (ep, req, 0)) == 0) | 941 | if ((retval = setup_req (ep, req, 0)) == 0) { |
| 942 | retval = usb_ep_queue (ep, req, GFP_ATOMIC); | 942 | spin_unlock_irq (&dev->lock); |
| 943 | retval = usb_ep_queue (ep, req, GFP_KERNEL); | ||
| 944 | spin_lock_irq (&dev->lock); | ||
| 945 | } | ||
| 943 | dev->state = STATE_DEV_CONNECTED; | 946 | dev->state = STATE_DEV_CONNECTED; |
| 944 | 947 | ||
| 945 | /* assume that was SET_CONFIGURATION */ | 948 | /* assume that was SET_CONFIGURATION */ |
| @@ -1457,8 +1460,11 @@ delegate: | |||
| 1457 | w_length); | 1460 | w_length); |
| 1458 | if (value < 0) | 1461 | if (value < 0) |
| 1459 | break; | 1462 | break; |
| 1463 | |||
| 1464 | spin_unlock (&dev->lock); | ||
| 1460 | value = usb_ep_queue (gadget->ep0, dev->req, | 1465 | value = usb_ep_queue (gadget->ep0, dev->req, |
| 1461 | GFP_ATOMIC); | 1466 | GFP_KERNEL); |
| 1467 | spin_lock (&dev->lock); | ||
| 1462 | if (value < 0) { | 1468 | if (value < 0) { |
| 1463 | clean_req (gadget->ep0, dev->req); | 1469 | clean_req (gadget->ep0, dev->req); |
| 1464 | break; | 1470 | break; |
| @@ -1481,11 +1487,14 @@ delegate: | |||
| 1481 | if (value >= 0 && dev->state != STATE_DEV_SETUP) { | 1487 | if (value >= 0 && dev->state != STATE_DEV_SETUP) { |
| 1482 | req->length = value; | 1488 | req->length = value; |
| 1483 | req->zero = value < w_length; | 1489 | req->zero = value < w_length; |
| 1484 | value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC); | 1490 | |
| 1491 | spin_unlock (&dev->lock); | ||
| 1492 | value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL); | ||
| 1485 | if (value < 0) { | 1493 | if (value < 0) { |
| 1486 | DBG (dev, "ep_queue --> %d\n", value); | 1494 | DBG (dev, "ep_queue --> %d\n", value); |
| 1487 | req->status = 0; | 1495 | req->status = 0; |
| 1488 | } | 1496 | } |
| 1497 | return value; | ||
| 1489 | } | 1498 | } |
| 1490 | 1499 | ||
| 1491 | /* device stalls when value < 0 */ | 1500 | /* device stalls when value < 0 */ |
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index 6e8300d6a737..e1b2dcebdc2e 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c | |||
| @@ -603,11 +603,15 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver) | |||
| 603 | } | 603 | } |
| 604 | } | 604 | } |
| 605 | 605 | ||
| 606 | list_add_tail(&driver->pending, &gadget_driver_pending_list); | 606 | if (!driver->match_existing_only) { |
| 607 | pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n", | 607 | list_add_tail(&driver->pending, &gadget_driver_pending_list); |
| 608 | driver->function); | 608 | pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n", |
| 609 | driver->function); | ||
| 610 | ret = 0; | ||
| 611 | } | ||
| 612 | |||
| 609 | mutex_unlock(&udc_lock); | 613 | mutex_unlock(&udc_lock); |
| 610 | return 0; | 614 | return ret; |
| 611 | found: | 615 | found: |
| 612 | ret = udc_bind_to_driver(udc, driver); | 616 | ret = udc_bind_to_driver(udc, driver); |
| 613 | mutex_unlock(&udc_lock); | 617 | mutex_unlock(&udc_lock); |
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index ae1b6e69eb96..a962b89b65a6 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
| @@ -368,6 +368,15 @@ static void ehci_shutdown(struct usb_hcd *hcd) | |||
| 368 | { | 368 | { |
| 369 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | 369 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
| 370 | 370 | ||
| 371 | /** | ||
| 372 | * Protect the system from crashing at system shutdown in cases where | ||
| 373 | * usb host is not added yet from OTG controller driver. | ||
| 374 | * As ehci_setup() not done yet, so stop accessing registers or | ||
| 375 | * variables initialized in ehci_setup() | ||
| 376 | */ | ||
| 377 | if (!ehci->sbrn) | ||
| 378 | return; | ||
| 379 | |||
| 371 | spin_lock_irq(&ehci->lock); | 380 | spin_lock_irq(&ehci->lock); |
| 372 | ehci->shutdown = true; | 381 | ehci->shutdown = true; |
| 373 | ehci->rh_state = EHCI_RH_STOPPING; | 382 | ehci->rh_state = EHCI_RH_STOPPING; |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index ffc90295a95f..74f62d68f013 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
| @@ -872,15 +872,23 @@ int ehci_hub_control( | |||
| 872 | ) { | 872 | ) { |
| 873 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); | 873 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 874 | int ports = HCS_N_PORTS (ehci->hcs_params); | 874 | int ports = HCS_N_PORTS (ehci->hcs_params); |
| 875 | u32 __iomem *status_reg = &ehci->regs->port_status[ | 875 | u32 __iomem *status_reg, *hostpc_reg; |
| 876 | (wIndex & 0xff) - 1]; | ||
| 877 | u32 __iomem *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1]; | ||
| 878 | u32 temp, temp1, status; | 876 | u32 temp, temp1, status; |
| 879 | unsigned long flags; | 877 | unsigned long flags; |
| 880 | int retval = 0; | 878 | int retval = 0; |
| 881 | unsigned selector; | 879 | unsigned selector; |
| 882 | 880 | ||
| 883 | /* | 881 | /* |
| 882 | * Avoid underflow while calculating (wIndex & 0xff) - 1. | ||
| 883 | * The compiler might deduce that wIndex can never be 0 and then | ||
| 884 | * optimize away the tests for !wIndex below. | ||
| 885 | */ | ||
| 886 | temp = wIndex & 0xff; | ||
| 887 | temp -= (temp > 0); | ||
| 888 | status_reg = &ehci->regs->port_status[temp]; | ||
| 889 | hostpc_reg = &ehci->regs->hostpc[temp]; | ||
| 890 | |||
| 891 | /* | ||
| 884 | * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. | 892 | * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. |
| 885 | * HCS_INDICATOR may say we can change LEDs to off/amber/green. | 893 | * HCS_INDICATOR may say we can change LEDs to off/amber/green. |
| 886 | * (track current state ourselves) ... blink for diagnostics, | 894 | * (track current state ourselves) ... blink for diagnostics, |
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c index d3afc89d00f5..2f8d3af811ce 100644 --- a/drivers/usb/host/ehci-msm.c +++ b/drivers/usb/host/ehci-msm.c | |||
| @@ -179,22 +179,32 @@ static int ehci_msm_remove(struct platform_device *pdev) | |||
| 179 | static int ehci_msm_pm_suspend(struct device *dev) | 179 | static int ehci_msm_pm_suspend(struct device *dev) |
| 180 | { | 180 | { |
| 181 | struct usb_hcd *hcd = dev_get_drvdata(dev); | 181 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
| 182 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
| 182 | bool do_wakeup = device_may_wakeup(dev); | 183 | bool do_wakeup = device_may_wakeup(dev); |
| 183 | 184 | ||
| 184 | dev_dbg(dev, "ehci-msm PM suspend\n"); | 185 | dev_dbg(dev, "ehci-msm PM suspend\n"); |
| 185 | 186 | ||
| 186 | return ehci_suspend(hcd, do_wakeup); | 187 | /* Only call ehci_suspend if ehci_setup has been done */ |
| 188 | if (ehci->sbrn) | ||
| 189 | return ehci_suspend(hcd, do_wakeup); | ||
| 190 | |||
| 191 | return 0; | ||
| 187 | } | 192 | } |
| 188 | 193 | ||
| 189 | static int ehci_msm_pm_resume(struct device *dev) | 194 | static int ehci_msm_pm_resume(struct device *dev) |
| 190 | { | 195 | { |
| 191 | struct usb_hcd *hcd = dev_get_drvdata(dev); | 196 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
| 197 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
| 192 | 198 | ||
| 193 | dev_dbg(dev, "ehci-msm PM resume\n"); | 199 | dev_dbg(dev, "ehci-msm PM resume\n"); |
| 194 | ehci_resume(hcd, false); | 200 | |
| 201 | /* Only call ehci_resume if ehci_setup has been done */ | ||
| 202 | if (ehci->sbrn) | ||
| 203 | ehci_resume(hcd, false); | ||
| 195 | 204 | ||
| 196 | return 0; | 205 | return 0; |
| 197 | } | 206 | } |
| 207 | |||
| 198 | #else | 208 | #else |
| 199 | #define ehci_msm_pm_suspend NULL | 209 | #define ehci_msm_pm_suspend NULL |
| 200 | #define ehci_msm_pm_resume NULL | 210 | #define ehci_msm_pm_resume NULL |
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c index 4031b372008e..9a3d7db5be57 100644 --- a/drivers/usb/host/ehci-tegra.c +++ b/drivers/usb/host/ehci-tegra.c | |||
| @@ -81,15 +81,23 @@ static int tegra_reset_usb_controller(struct platform_device *pdev) | |||
| 81 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | 81 | struct usb_hcd *hcd = platform_get_drvdata(pdev); |
| 82 | struct tegra_ehci_hcd *tegra = | 82 | struct tegra_ehci_hcd *tegra = |
| 83 | (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv; | 83 | (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv; |
| 84 | bool has_utmi_pad_registers = false; | ||
| 84 | 85 | ||
| 85 | phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0); | 86 | phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0); |
| 86 | if (!phy_np) | 87 | if (!phy_np) |
| 87 | return -ENOENT; | 88 | return -ENOENT; |
| 88 | 89 | ||
| 90 | if (of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers")) | ||
| 91 | has_utmi_pad_registers = true; | ||
| 92 | |||
| 89 | if (!usb1_reset_attempted) { | 93 | if (!usb1_reset_attempted) { |
| 90 | struct reset_control *usb1_reset; | 94 | struct reset_control *usb1_reset; |
| 91 | 95 | ||
| 92 | usb1_reset = of_reset_control_get(phy_np, "usb"); | 96 | if (!has_utmi_pad_registers) |
| 97 | usb1_reset = of_reset_control_get(phy_np, "utmi-pads"); | ||
| 98 | else | ||
| 99 | usb1_reset = tegra->rst; | ||
| 100 | |||
| 93 | if (IS_ERR(usb1_reset)) { | 101 | if (IS_ERR(usb1_reset)) { |
| 94 | dev_warn(&pdev->dev, | 102 | dev_warn(&pdev->dev, |
| 95 | "can't get utmi-pads reset from the PHY\n"); | 103 | "can't get utmi-pads reset from the PHY\n"); |
| @@ -99,13 +107,15 @@ static int tegra_reset_usb_controller(struct platform_device *pdev) | |||
| 99 | reset_control_assert(usb1_reset); | 107 | reset_control_assert(usb1_reset); |
| 100 | udelay(1); | 108 | udelay(1); |
| 101 | reset_control_deassert(usb1_reset); | 109 | reset_control_deassert(usb1_reset); |
| 110 | |||
| 111 | if (!has_utmi_pad_registers) | ||
| 112 | reset_control_put(usb1_reset); | ||
| 102 | } | 113 | } |
| 103 | 114 | ||
| 104 | reset_control_put(usb1_reset); | ||
| 105 | usb1_reset_attempted = true; | 115 | usb1_reset_attempted = true; |
| 106 | } | 116 | } |
| 107 | 117 | ||
| 108 | if (!of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers")) { | 118 | if (!has_utmi_pad_registers) { |
| 109 | reset_control_assert(tegra->rst); | 119 | reset_control_assert(tegra->rst); |
| 110 | udelay(1); | 120 | udelay(1); |
| 111 | reset_control_deassert(tegra->rst); | 121 | reset_control_deassert(tegra->rst); |
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index d029bbe9eb36..641fed609911 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c | |||
| @@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) | |||
| 183 | { | 183 | { |
| 184 | int branch; | 184 | int branch; |
| 185 | 185 | ||
| 186 | ed->state = ED_OPER; | ||
| 187 | ed->ed_prev = NULL; | 186 | ed->ed_prev = NULL; |
| 188 | ed->ed_next = NULL; | 187 | ed->ed_next = NULL; |
| 189 | ed->hwNextED = 0; | 188 | ed->hwNextED = 0; |
| @@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) | |||
| 259 | /* the HC may not see the schedule updates yet, but if it does | 258 | /* the HC may not see the schedule updates yet, but if it does |
| 260 | * then they'll be properly ordered. | 259 | * then they'll be properly ordered. |
| 261 | */ | 260 | */ |
| 261 | |||
| 262 | ed->state = ED_OPER; | ||
| 262 | return 0; | 263 | return 0; |
| 263 | } | 264 | } |
| 264 | 265 | ||
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 48672fac7ff3..c10972fcc8e4 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | /* Device for a quirk */ | 37 | /* Device for a quirk */ |
| 38 | #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 | 38 | #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 |
| 39 | #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 | 39 | #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 |
| 40 | #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009 | ||
| 40 | #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 | 41 | #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 |
| 41 | 42 | ||
| 42 | #define PCI_VENDOR_ID_ETRON 0x1b6f | 43 | #define PCI_VENDOR_ID_ETRON 0x1b6f |
| @@ -114,6 +115,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
| 114 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; | 115 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; |
| 115 | } | 116 | } |
| 116 | 117 | ||
| 118 | if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && | ||
| 119 | pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009) | ||
| 120 | xhci->quirks |= XHCI_BROKEN_STREAMS; | ||
| 121 | |||
| 117 | if (pdev->vendor == PCI_VENDOR_ID_NEC) | 122 | if (pdev->vendor == PCI_VENDOR_ID_NEC) |
| 118 | xhci->quirks |= XHCI_NEC_HOST; | 123 | xhci->quirks |= XHCI_NEC_HOST; |
| 119 | 124 | ||
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 676ea458148b..1f3f981fe7f8 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c | |||
| @@ -196,6 +196,9 @@ static int xhci_plat_probe(struct platform_device *pdev) | |||
| 196 | ret = clk_prepare_enable(clk); | 196 | ret = clk_prepare_enable(clk); |
| 197 | if (ret) | 197 | if (ret) |
| 198 | goto put_hcd; | 198 | goto put_hcd; |
| 199 | } else if (PTR_ERR(clk) == -EPROBE_DEFER) { | ||
| 200 | ret = -EPROBE_DEFER; | ||
| 201 | goto put_hcd; | ||
| 199 | } | 202 | } |
| 200 | 203 | ||
| 201 | xhci = hcd_to_xhci(hcd); | 204 | xhci = hcd_to_xhci(hcd); |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 52deae4b7eac..d7d502578d79 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
| @@ -290,6 +290,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) | |||
| 290 | 290 | ||
| 291 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); | 291 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
| 292 | xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; | 292 | xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; |
| 293 | |||
| 294 | /* | ||
| 295 | * Writing the CMD_RING_ABORT bit should cause a cmd completion event, | ||
| 296 | * however on some host hw the CMD_RING_RUNNING bit is correctly cleared | ||
| 297 | * but the completion event in never sent. Use the cmd timeout timer to | ||
| 298 | * handle those cases. Use twice the time to cover the bit polling retry | ||
| 299 | */ | ||
| 300 | mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT)); | ||
| 293 | xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, | 301 | xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, |
| 294 | &xhci->op_regs->cmd_ring); | 302 | &xhci->op_regs->cmd_ring); |
| 295 | 303 | ||
| @@ -314,6 +322,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) | |||
| 314 | 322 | ||
| 315 | xhci_err(xhci, "Stopped the command ring failed, " | 323 | xhci_err(xhci, "Stopped the command ring failed, " |
| 316 | "maybe the host is dead\n"); | 324 | "maybe the host is dead\n"); |
| 325 | del_timer(&xhci->cmd_timer); | ||
| 317 | xhci->xhc_state |= XHCI_STATE_DYING; | 326 | xhci->xhc_state |= XHCI_STATE_DYING; |
| 318 | xhci_quiesce(xhci); | 327 | xhci_quiesce(xhci); |
| 319 | xhci_halt(xhci); | 328 | xhci_halt(xhci); |
| @@ -1246,22 +1255,21 @@ void xhci_handle_command_timeout(unsigned long data) | |||
| 1246 | int ret; | 1255 | int ret; |
| 1247 | unsigned long flags; | 1256 | unsigned long flags; |
| 1248 | u64 hw_ring_state; | 1257 | u64 hw_ring_state; |
| 1249 | struct xhci_command *cur_cmd = NULL; | 1258 | bool second_timeout = false; |
| 1250 | xhci = (struct xhci_hcd *) data; | 1259 | xhci = (struct xhci_hcd *) data; |
| 1251 | 1260 | ||
| 1252 | /* mark this command to be cancelled */ | 1261 | /* mark this command to be cancelled */ |
| 1253 | spin_lock_irqsave(&xhci->lock, flags); | 1262 | spin_lock_irqsave(&xhci->lock, flags); |
| 1254 | if (xhci->current_cmd) { | 1263 | if (xhci->current_cmd) { |
| 1255 | cur_cmd = xhci->current_cmd; | 1264 | if (xhci->current_cmd->status == COMP_CMD_ABORT) |
| 1256 | cur_cmd->status = COMP_CMD_ABORT; | 1265 | second_timeout = true; |
| 1266 | xhci->current_cmd->status = COMP_CMD_ABORT; | ||
| 1257 | } | 1267 | } |
| 1258 | 1268 | ||
| 1259 | |||
| 1260 | /* Make sure command ring is running before aborting it */ | 1269 | /* Make sure command ring is running before aborting it */ |
| 1261 | hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); | 1270 | hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
| 1262 | if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && | 1271 | if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && |
| 1263 | (hw_ring_state & CMD_RING_RUNNING)) { | 1272 | (hw_ring_state & CMD_RING_RUNNING)) { |
| 1264 | |||
| 1265 | spin_unlock_irqrestore(&xhci->lock, flags); | 1273 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 1266 | xhci_dbg(xhci, "Command timeout\n"); | 1274 | xhci_dbg(xhci, "Command timeout\n"); |
| 1267 | ret = xhci_abort_cmd_ring(xhci); | 1275 | ret = xhci_abort_cmd_ring(xhci); |
| @@ -1273,6 +1281,15 @@ void xhci_handle_command_timeout(unsigned long data) | |||
| 1273 | } | 1281 | } |
| 1274 | return; | 1282 | return; |
| 1275 | } | 1283 | } |
| 1284 | |||
| 1285 | /* command ring failed to restart, or host removed. Bail out */ | ||
| 1286 | if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) { | ||
| 1287 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
| 1288 | xhci_dbg(xhci, "command timed out twice, ring start fail?\n"); | ||
| 1289 | xhci_cleanup_command_queue(xhci); | ||
| 1290 | return; | ||
| 1291 | } | ||
| 1292 | |||
| 1276 | /* command timeout on stopped ring, ring can't be aborted */ | 1293 | /* command timeout on stopped ring, ring can't be aborted */ |
| 1277 | xhci_dbg(xhci, "Command timeout on stopped ring\n"); | 1294 | xhci_dbg(xhci, "Command timeout on stopped ring\n"); |
| 1278 | xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); | 1295 | xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); |
| @@ -2721,7 +2738,8 @@ hw_died: | |||
| 2721 | writel(irq_pending, &xhci->ir_set->irq_pending); | 2738 | writel(irq_pending, &xhci->ir_set->irq_pending); |
| 2722 | } | 2739 | } |
| 2723 | 2740 | ||
| 2724 | if (xhci->xhc_state & XHCI_STATE_DYING) { | 2741 | if (xhci->xhc_state & XHCI_STATE_DYING || |
| 2742 | xhci->xhc_state & XHCI_STATE_HALTED) { | ||
| 2725 | xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " | 2743 | xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " |
| 2726 | "Shouldn't IRQs be disabled?\n"); | 2744 | "Shouldn't IRQs be disabled?\n"); |
| 2727 | /* Clear the event handler busy flag (RW1C); | 2745 | /* Clear the event handler busy flag (RW1C); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index fa7e1ef36cd9..f2f9518c53ab 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
| @@ -685,20 +685,23 @@ void xhci_stop(struct usb_hcd *hcd) | |||
| 685 | u32 temp; | 685 | u32 temp; |
| 686 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 686 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 687 | 687 | ||
| 688 | if (xhci->xhc_state & XHCI_STATE_HALTED) | ||
| 689 | return; | ||
| 690 | |||
| 691 | mutex_lock(&xhci->mutex); | 688 | mutex_lock(&xhci->mutex); |
| 692 | spin_lock_irq(&xhci->lock); | ||
| 693 | xhci->xhc_state |= XHCI_STATE_HALTED; | ||
| 694 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; | ||
| 695 | 689 | ||
| 696 | /* Make sure the xHC is halted for a USB3 roothub | 690 | if (!(xhci->xhc_state & XHCI_STATE_HALTED)) { |
| 697 | * (xhci_stop() could be called as part of failed init). | 691 | spin_lock_irq(&xhci->lock); |
| 698 | */ | 692 | |
| 699 | xhci_halt(xhci); | 693 | xhci->xhc_state |= XHCI_STATE_HALTED; |
| 700 | xhci_reset(xhci); | 694 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; |
| 701 | spin_unlock_irq(&xhci->lock); | 695 | xhci_halt(xhci); |
| 696 | xhci_reset(xhci); | ||
| 697 | |||
| 698 | spin_unlock_irq(&xhci->lock); | ||
| 699 | } | ||
| 700 | |||
| 701 | if (!usb_hcd_is_primary_hcd(hcd)) { | ||
| 702 | mutex_unlock(&xhci->mutex); | ||
| 703 | return; | ||
| 704 | } | ||
| 702 | 705 | ||
| 703 | xhci_cleanup_msix(xhci); | 706 | xhci_cleanup_msix(xhci); |
| 704 | 707 | ||
| @@ -4886,7 +4889,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | |||
| 4886 | xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); | 4889 | xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); |
| 4887 | xhci_print_registers(xhci); | 4890 | xhci_print_registers(xhci); |
| 4888 | 4891 | ||
| 4889 | xhci->quirks = quirks; | 4892 | xhci->quirks |= quirks; |
| 4890 | 4893 | ||
| 4891 | get_quirks(dev, xhci); | 4894 | get_quirks(dev, xhci); |
| 4892 | 4895 | ||
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 39fd95833eb8..f824336def5c 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
| @@ -1090,29 +1090,6 @@ void musb_stop(struct musb *musb) | |||
| 1090 | musb_platform_try_idle(musb, 0); | 1090 | musb_platform_try_idle(musb, 0); |
| 1091 | } | 1091 | } |
| 1092 | 1092 | ||
| 1093 | static void musb_shutdown(struct platform_device *pdev) | ||
| 1094 | { | ||
| 1095 | struct musb *musb = dev_to_musb(&pdev->dev); | ||
| 1096 | unsigned long flags; | ||
| 1097 | |||
| 1098 | pm_runtime_get_sync(musb->controller); | ||
| 1099 | |||
| 1100 | musb_host_cleanup(musb); | ||
| 1101 | musb_gadget_cleanup(musb); | ||
| 1102 | |||
| 1103 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1104 | musb_platform_disable(musb); | ||
| 1105 | musb_generic_disable(musb); | ||
| 1106 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1107 | |||
| 1108 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
| 1109 | musb_platform_exit(musb); | ||
| 1110 | |||
| 1111 | pm_runtime_put(musb->controller); | ||
| 1112 | /* FIXME power down */ | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | |||
| 1116 | /*-------------------------------------------------------------------------*/ | 1093 | /*-------------------------------------------------------------------------*/ |
| 1117 | 1094 | ||
| 1118 | /* | 1095 | /* |
| @@ -1702,7 +1679,7 @@ EXPORT_SYMBOL_GPL(musb_dma_completion); | |||
| 1702 | #define use_dma 0 | 1679 | #define use_dma 0 |
| 1703 | #endif | 1680 | #endif |
| 1704 | 1681 | ||
| 1705 | static void (*musb_phy_callback)(enum musb_vbus_id_status status); | 1682 | static int (*musb_phy_callback)(enum musb_vbus_id_status status); |
| 1706 | 1683 | ||
| 1707 | /* | 1684 | /* |
| 1708 | * musb_mailbox - optional phy notifier function | 1685 | * musb_mailbox - optional phy notifier function |
| @@ -1711,11 +1688,12 @@ static void (*musb_phy_callback)(enum musb_vbus_id_status status); | |||
| 1711 | * Optionally gets called from the USB PHY. Note that the USB PHY must be | 1688 | * Optionally gets called from the USB PHY. Note that the USB PHY must be |
| 1712 | * disabled at the point the phy_callback is registered or unregistered. | 1689 | * disabled at the point the phy_callback is registered or unregistered. |
| 1713 | */ | 1690 | */ |
| 1714 | void musb_mailbox(enum musb_vbus_id_status status) | 1691 | int musb_mailbox(enum musb_vbus_id_status status) |
| 1715 | { | 1692 | { |
| 1716 | if (musb_phy_callback) | 1693 | if (musb_phy_callback) |
| 1717 | musb_phy_callback(status); | 1694 | return musb_phy_callback(status); |
| 1718 | 1695 | ||
| 1696 | return -ENODEV; | ||
| 1719 | }; | 1697 | }; |
| 1720 | EXPORT_SYMBOL_GPL(musb_mailbox); | 1698 | EXPORT_SYMBOL_GPL(musb_mailbox); |
| 1721 | 1699 | ||
| @@ -2028,11 +2006,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
| 2028 | musb_readl = musb_default_readl; | 2006 | musb_readl = musb_default_readl; |
| 2029 | musb_writel = musb_default_writel; | 2007 | musb_writel = musb_default_writel; |
| 2030 | 2008 | ||
| 2031 | /* We need musb_read/write functions initialized for PM */ | ||
| 2032 | pm_runtime_use_autosuspend(musb->controller); | ||
| 2033 | pm_runtime_set_autosuspend_delay(musb->controller, 200); | ||
| 2034 | pm_runtime_enable(musb->controller); | ||
| 2035 | |||
| 2036 | /* The musb_platform_init() call: | 2009 | /* The musb_platform_init() call: |
| 2037 | * - adjusts musb->mregs | 2010 | * - adjusts musb->mregs |
| 2038 | * - sets the musb->isr | 2011 | * - sets the musb->isr |
| @@ -2134,6 +2107,16 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
| 2134 | if (musb->ops->phy_callback) | 2107 | if (musb->ops->phy_callback) |
| 2135 | musb_phy_callback = musb->ops->phy_callback; | 2108 | musb_phy_callback = musb->ops->phy_callback; |
| 2136 | 2109 | ||
| 2110 | /* | ||
| 2111 | * We need musb_read/write functions initialized for PM. | ||
| 2112 | * Note that at least 2430 glue needs autosuspend delay | ||
| 2113 | * somewhere above 300 ms for the hardware to idle properly | ||
| 2114 | * after disconnecting the cable in host mode. Let's use | ||
| 2115 | * 500 ms for some margin. | ||
| 2116 | */ | ||
| 2117 | pm_runtime_use_autosuspend(musb->controller); | ||
| 2118 | pm_runtime_set_autosuspend_delay(musb->controller, 500); | ||
| 2119 | pm_runtime_enable(musb->controller); | ||
| 2137 | pm_runtime_get_sync(musb->controller); | 2120 | pm_runtime_get_sync(musb->controller); |
| 2138 | 2121 | ||
| 2139 | status = usb_phy_init(musb->xceiv); | 2122 | status = usb_phy_init(musb->xceiv); |
| @@ -2237,13 +2220,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
| 2237 | if (status) | 2220 | if (status) |
| 2238 | goto fail5; | 2221 | goto fail5; |
| 2239 | 2222 | ||
| 2240 | pm_runtime_put(musb->controller); | 2223 | pm_runtime_mark_last_busy(musb->controller); |
| 2241 | 2224 | pm_runtime_put_autosuspend(musb->controller); | |
| 2242 | /* | ||
| 2243 | * For why this is currently needed, see commit 3e43a0725637 | ||
| 2244 | * ("usb: musb: core: add pm_runtime_irq_safe()") | ||
| 2245 | */ | ||
| 2246 | pm_runtime_irq_safe(musb->controller); | ||
| 2247 | 2225 | ||
| 2248 | return 0; | 2226 | return 0; |
| 2249 | 2227 | ||
| @@ -2265,7 +2243,9 @@ fail2_5: | |||
| 2265 | usb_phy_shutdown(musb->xceiv); | 2243 | usb_phy_shutdown(musb->xceiv); |
| 2266 | 2244 | ||
| 2267 | err_usb_phy_init: | 2245 | err_usb_phy_init: |
| 2246 | pm_runtime_dont_use_autosuspend(musb->controller); | ||
| 2268 | pm_runtime_put_sync(musb->controller); | 2247 | pm_runtime_put_sync(musb->controller); |
| 2248 | pm_runtime_disable(musb->controller); | ||
| 2269 | 2249 | ||
| 2270 | fail2: | 2250 | fail2: |
| 2271 | if (musb->irq_wake) | 2251 | if (musb->irq_wake) |
| @@ -2273,7 +2253,6 @@ fail2: | |||
| 2273 | musb_platform_exit(musb); | 2253 | musb_platform_exit(musb); |
| 2274 | 2254 | ||
| 2275 | fail1: | 2255 | fail1: |
| 2276 | pm_runtime_disable(musb->controller); | ||
| 2277 | dev_err(musb->controller, | 2256 | dev_err(musb->controller, |
| 2278 | "musb_init_controller failed with status %d\n", status); | 2257 | "musb_init_controller failed with status %d\n", status); |
| 2279 | 2258 | ||
| @@ -2312,6 +2291,7 @@ static int musb_remove(struct platform_device *pdev) | |||
| 2312 | { | 2291 | { |
| 2313 | struct device *dev = &pdev->dev; | 2292 | struct device *dev = &pdev->dev; |
| 2314 | struct musb *musb = dev_to_musb(dev); | 2293 | struct musb *musb = dev_to_musb(dev); |
| 2294 | unsigned long flags; | ||
| 2315 | 2295 | ||
| 2316 | /* this gets called on rmmod. | 2296 | /* this gets called on rmmod. |
| 2317 | * - Host mode: host may still be active | 2297 | * - Host mode: host may still be active |
| @@ -2319,17 +2299,26 @@ static int musb_remove(struct platform_device *pdev) | |||
| 2319 | * - OTG mode: both roles are deactivated (or never-activated) | 2299 | * - OTG mode: both roles are deactivated (or never-activated) |
| 2320 | */ | 2300 | */ |
| 2321 | musb_exit_debugfs(musb); | 2301 | musb_exit_debugfs(musb); |
| 2322 | musb_shutdown(pdev); | ||
| 2323 | musb_phy_callback = NULL; | ||
| 2324 | |||
| 2325 | if (musb->dma_controller) | ||
| 2326 | musb_dma_controller_destroy(musb->dma_controller); | ||
| 2327 | |||
| 2328 | usb_phy_shutdown(musb->xceiv); | ||
| 2329 | 2302 | ||
| 2330 | cancel_work_sync(&musb->irq_work); | 2303 | cancel_work_sync(&musb->irq_work); |
| 2331 | cancel_delayed_work_sync(&musb->finish_resume_work); | 2304 | cancel_delayed_work_sync(&musb->finish_resume_work); |
| 2332 | cancel_delayed_work_sync(&musb->deassert_reset_work); | 2305 | cancel_delayed_work_sync(&musb->deassert_reset_work); |
| 2306 | pm_runtime_get_sync(musb->controller); | ||
| 2307 | musb_host_cleanup(musb); | ||
| 2308 | musb_gadget_cleanup(musb); | ||
| 2309 | spin_lock_irqsave(&musb->lock, flags); | ||
| 2310 | musb_platform_disable(musb); | ||
| 2311 | musb_generic_disable(musb); | ||
| 2312 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 2313 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
| 2314 | pm_runtime_dont_use_autosuspend(musb->controller); | ||
| 2315 | pm_runtime_put_sync(musb->controller); | ||
| 2316 | pm_runtime_disable(musb->controller); | ||
| 2317 | musb_platform_exit(musb); | ||
| 2318 | musb_phy_callback = NULL; | ||
| 2319 | if (musb->dma_controller) | ||
| 2320 | musb_dma_controller_destroy(musb->dma_controller); | ||
| 2321 | usb_phy_shutdown(musb->xceiv); | ||
| 2333 | musb_free(musb); | 2322 | musb_free(musb); |
| 2334 | device_init_wakeup(dev, 0); | 2323 | device_init_wakeup(dev, 0); |
| 2335 | return 0; | 2324 | return 0; |
| @@ -2429,7 +2418,8 @@ static void musb_restore_context(struct musb *musb) | |||
| 2429 | musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe); | 2418 | musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe); |
| 2430 | musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe); | 2419 | musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe); |
| 2431 | musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe); | 2420 | musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe); |
| 2432 | musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl); | 2421 | if (musb->context.devctl & MUSB_DEVCTL_SESSION) |
| 2422 | musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl); | ||
| 2433 | 2423 | ||
| 2434 | for (i = 0; i < musb->config->num_eps; ++i) { | 2424 | for (i = 0; i < musb->config->num_eps; ++i) { |
| 2435 | struct musb_hw_ep *hw_ep; | 2425 | struct musb_hw_ep *hw_ep; |
| @@ -2612,7 +2602,6 @@ static struct platform_driver musb_driver = { | |||
| 2612 | }, | 2602 | }, |
| 2613 | .probe = musb_probe, | 2603 | .probe = musb_probe, |
| 2614 | .remove = musb_remove, | 2604 | .remove = musb_remove, |
| 2615 | .shutdown = musb_shutdown, | ||
| 2616 | }; | 2605 | }; |
| 2617 | 2606 | ||
| 2618 | module_platform_driver(musb_driver); | 2607 | module_platform_driver(musb_driver); |
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index b6afe9e43305..b55a776b03eb 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
| @@ -215,7 +215,7 @@ struct musb_platform_ops { | |||
| 215 | dma_addr_t *dma_addr, u32 *len); | 215 | dma_addr_t *dma_addr, u32 *len); |
| 216 | void (*pre_root_reset_end)(struct musb *musb); | 216 | void (*pre_root_reset_end)(struct musb *musb); |
| 217 | void (*post_root_reset_end)(struct musb *musb); | 217 | void (*post_root_reset_end)(struct musb *musb); |
| 218 | void (*phy_callback)(enum musb_vbus_id_status status); | 218 | int (*phy_callback)(enum musb_vbus_id_status status); |
| 219 | }; | 219 | }; |
| 220 | 220 | ||
| 221 | /* | 221 | /* |
| @@ -312,6 +312,7 @@ struct musb { | |||
| 312 | struct work_struct irq_work; | 312 | struct work_struct irq_work; |
| 313 | struct delayed_work deassert_reset_work; | 313 | struct delayed_work deassert_reset_work; |
| 314 | struct delayed_work finish_resume_work; | 314 | struct delayed_work finish_resume_work; |
| 315 | struct delayed_work gadget_work; | ||
| 315 | u16 hwvers; | 316 | u16 hwvers; |
| 316 | 317 | ||
| 317 | u16 intrrxe; | 318 | u16 intrrxe; |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 152865b36522..af2a3a7addf9 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
| @@ -1656,6 +1656,20 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) | |||
| 1656 | return usb_phy_set_power(musb->xceiv, mA); | 1656 | return usb_phy_set_power(musb->xceiv, mA); |
| 1657 | } | 1657 | } |
| 1658 | 1658 | ||
| 1659 | static void musb_gadget_work(struct work_struct *work) | ||
| 1660 | { | ||
| 1661 | struct musb *musb; | ||
| 1662 | unsigned long flags; | ||
| 1663 | |||
| 1664 | musb = container_of(work, struct musb, gadget_work.work); | ||
| 1665 | pm_runtime_get_sync(musb->controller); | ||
| 1666 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1667 | musb_pullup(musb, musb->softconnect); | ||
| 1668 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1669 | pm_runtime_mark_last_busy(musb->controller); | ||
| 1670 | pm_runtime_put_autosuspend(musb->controller); | ||
| 1671 | } | ||
| 1672 | |||
| 1659 | static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) | 1673 | static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) |
| 1660 | { | 1674 | { |
| 1661 | struct musb *musb = gadget_to_musb(gadget); | 1675 | struct musb *musb = gadget_to_musb(gadget); |
| @@ -1663,20 +1677,16 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) | |||
| 1663 | 1677 | ||
| 1664 | is_on = !!is_on; | 1678 | is_on = !!is_on; |
| 1665 | 1679 | ||
| 1666 | pm_runtime_get_sync(musb->controller); | ||
| 1667 | |||
| 1668 | /* NOTE: this assumes we are sensing vbus; we'd rather | 1680 | /* NOTE: this assumes we are sensing vbus; we'd rather |
| 1669 | * not pullup unless the B-session is active. | 1681 | * not pullup unless the B-session is active. |
| 1670 | */ | 1682 | */ |
| 1671 | spin_lock_irqsave(&musb->lock, flags); | 1683 | spin_lock_irqsave(&musb->lock, flags); |
| 1672 | if (is_on != musb->softconnect) { | 1684 | if (is_on != musb->softconnect) { |
| 1673 | musb->softconnect = is_on; | 1685 | musb->softconnect = is_on; |
| 1674 | musb_pullup(musb, is_on); | 1686 | schedule_delayed_work(&musb->gadget_work, 0); |
| 1675 | } | 1687 | } |
| 1676 | spin_unlock_irqrestore(&musb->lock, flags); | 1688 | spin_unlock_irqrestore(&musb->lock, flags); |
| 1677 | 1689 | ||
| 1678 | pm_runtime_put(musb->controller); | ||
| 1679 | |||
| 1680 | return 0; | 1690 | return 0; |
| 1681 | } | 1691 | } |
| 1682 | 1692 | ||
| @@ -1845,7 +1855,7 @@ int musb_gadget_setup(struct musb *musb) | |||
| 1845 | #elif IS_ENABLED(CONFIG_USB_MUSB_GADGET) | 1855 | #elif IS_ENABLED(CONFIG_USB_MUSB_GADGET) |
| 1846 | musb->g.is_otg = 0; | 1856 | musb->g.is_otg = 0; |
| 1847 | #endif | 1857 | #endif |
| 1848 | 1858 | INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work); | |
| 1849 | musb_g_init_endpoints(musb); | 1859 | musb_g_init_endpoints(musb); |
| 1850 | 1860 | ||
| 1851 | musb->is_active = 0; | 1861 | musb->is_active = 0; |
| @@ -1866,6 +1876,8 @@ void musb_gadget_cleanup(struct musb *musb) | |||
| 1866 | { | 1876 | { |
| 1867 | if (musb->port_mode == MUSB_PORT_MODE_HOST) | 1877 | if (musb->port_mode == MUSB_PORT_MODE_HOST) |
| 1868 | return; | 1878 | return; |
| 1879 | |||
| 1880 | cancel_delayed_work_sync(&musb->gadget_work); | ||
| 1869 | usb_del_gadget_udc(&musb->g); | 1881 | usb_del_gadget_udc(&musb->g); |
| 1870 | } | 1882 | } |
| 1871 | 1883 | ||
| @@ -1914,8 +1926,8 @@ static int musb_gadget_start(struct usb_gadget *g, | |||
| 1914 | if (musb->xceiv->last_event == USB_EVENT_ID) | 1926 | if (musb->xceiv->last_event == USB_EVENT_ID) |
| 1915 | musb_platform_set_vbus(musb, 1); | 1927 | musb_platform_set_vbus(musb, 1); |
| 1916 | 1928 | ||
| 1917 | if (musb->xceiv->last_event == USB_EVENT_NONE) | 1929 | pm_runtime_mark_last_busy(musb->controller); |
| 1918 | pm_runtime_put(musb->controller); | 1930 | pm_runtime_put_autosuspend(musb->controller); |
| 1919 | 1931 | ||
| 1920 | return 0; | 1932 | return 0; |
| 1921 | 1933 | ||
| @@ -1934,8 +1946,7 @@ static int musb_gadget_stop(struct usb_gadget *g) | |||
| 1934 | struct musb *musb = gadget_to_musb(g); | 1946 | struct musb *musb = gadget_to_musb(g); |
| 1935 | unsigned long flags; | 1947 | unsigned long flags; |
| 1936 | 1948 | ||
| 1937 | if (musb->xceiv->last_event == USB_EVENT_NONE) | 1949 | pm_runtime_get_sync(musb->controller); |
| 1938 | pm_runtime_get_sync(musb->controller); | ||
| 1939 | 1950 | ||
| 1940 | /* | 1951 | /* |
| 1941 | * REVISIT always use otg_set_peripheral() here too; | 1952 | * REVISIT always use otg_set_peripheral() here too; |
| @@ -1963,7 +1974,8 @@ static int musb_gadget_stop(struct usb_gadget *g) | |||
| 1963 | * that currently misbehaves. | 1974 | * that currently misbehaves. |
| 1964 | */ | 1975 | */ |
| 1965 | 1976 | ||
| 1966 | pm_runtime_put(musb->controller); | 1977 | pm_runtime_mark_last_busy(musb->controller); |
| 1978 | pm_runtime_put_autosuspend(musb->controller); | ||
| 1967 | 1979 | ||
| 1968 | return 0; | 1980 | return 0; |
| 1969 | } | 1981 | } |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 2f8ad7f1f482..d227a71d85e1 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
| @@ -434,7 +434,13 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb, | |||
| 434 | } | 434 | } |
| 435 | } | 435 | } |
| 436 | 436 | ||
| 437 | if (qh != NULL && qh->is_ready) { | 437 | /* |
| 438 | * The pipe must be broken if current urb->status is set, so don't | ||
| 439 | * start next urb. | ||
| 440 | * TODO: to minimize the risk of regression, only check urb->status | ||
| 441 | * for RX, until we have a test case to understand the behavior of TX. | ||
| 442 | */ | ||
| 443 | if ((!status || !is_in) && qh && qh->is_ready) { | ||
| 438 | dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", | 444 | dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", |
| 439 | hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); | 445 | hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); |
| 440 | musb_start_urb(musb, is_in, qh); | 446 | musb_start_urb(musb, is_in, qh); |
| @@ -594,14 +600,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum) | |||
| 594 | musb_writew(ep->regs, MUSB_TXCSR, 0); | 600 | musb_writew(ep->regs, MUSB_TXCSR, 0); |
| 595 | 601 | ||
| 596 | /* scrub all previous state, clearing toggle */ | 602 | /* scrub all previous state, clearing toggle */ |
| 597 | } else { | ||
| 598 | csr = musb_readw(ep->regs, MUSB_RXCSR); | ||
| 599 | if (csr & MUSB_RXCSR_RXPKTRDY) | ||
| 600 | WARNING("rx%d, packet/%d ready?\n", ep->epnum, | ||
| 601 | musb_readw(ep->regs, MUSB_RXCOUNT)); | ||
| 602 | |||
| 603 | musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); | ||
| 604 | } | 603 | } |
| 604 | csr = musb_readw(ep->regs, MUSB_RXCSR); | ||
| 605 | if (csr & MUSB_RXCSR_RXPKTRDY) | ||
| 606 | WARNING("rx%d, packet/%d ready?\n", ep->epnum, | ||
| 607 | musb_readw(ep->regs, MUSB_RXCOUNT)); | ||
| 608 | |||
| 609 | musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); | ||
| 605 | 610 | ||
| 606 | /* target addr and (for multipoint) hub addr/port */ | 611 | /* target addr and (for multipoint) hub addr/port */ |
| 607 | if (musb->is_multipoint) { | 612 | if (musb->is_multipoint) { |
| @@ -627,7 +632,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum) | |||
| 627 | ep->rx_reinit = 0; | 632 | ep->rx_reinit = 0; |
| 628 | } | 633 | } |
| 629 | 634 | ||
| 630 | static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma, | 635 | static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma, |
| 631 | struct musb_hw_ep *hw_ep, struct musb_qh *qh, | 636 | struct musb_hw_ep *hw_ep, struct musb_qh *qh, |
| 632 | struct urb *urb, u32 offset, | 637 | struct urb *urb, u32 offset, |
| 633 | u32 *length, u8 *mode) | 638 | u32 *length, u8 *mode) |
| @@ -664,23 +669,18 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma, | |||
| 664 | } | 669 | } |
| 665 | channel->desired_mode = *mode; | 670 | channel->desired_mode = *mode; |
| 666 | musb_writew(epio, MUSB_TXCSR, csr); | 671 | musb_writew(epio, MUSB_TXCSR, csr); |
| 667 | |||
| 668 | return 0; | ||
| 669 | } | 672 | } |
| 670 | 673 | ||
| 671 | static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma, | 674 | static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma, |
| 672 | struct musb_hw_ep *hw_ep, | 675 | struct musb_hw_ep *hw_ep, |
| 673 | struct musb_qh *qh, | 676 | struct musb_qh *qh, |
| 674 | struct urb *urb, | 677 | struct urb *urb, |
| 675 | u32 offset, | 678 | u32 offset, |
| 676 | u32 *length, | 679 | u32 *length, |
| 677 | u8 *mode) | 680 | u8 *mode) |
| 678 | { | 681 | { |
| 679 | struct dma_channel *channel = hw_ep->tx_channel; | 682 | struct dma_channel *channel = hw_ep->tx_channel; |
| 680 | 683 | ||
| 681 | if (!is_cppi_enabled(hw_ep->musb) && !tusb_dma_omap(hw_ep->musb)) | ||
| 682 | return -ENODEV; | ||
| 683 | |||
| 684 | channel->actual_len = 0; | 684 | channel->actual_len = 0; |
| 685 | 685 | ||
| 686 | /* | 686 | /* |
| @@ -688,8 +688,6 @@ static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma, | |||
| 688 | * to identify the zero-length-final-packet case. | 688 | * to identify the zero-length-final-packet case. |
| 689 | */ | 689 | */ |
| 690 | *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; | 690 | *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; |
| 691 | |||
| 692 | return 0; | ||
| 693 | } | 691 | } |
| 694 | 692 | ||
| 695 | static bool musb_tx_dma_program(struct dma_controller *dma, | 693 | static bool musb_tx_dma_program(struct dma_controller *dma, |
| @@ -699,15 +697,14 @@ static bool musb_tx_dma_program(struct dma_controller *dma, | |||
| 699 | struct dma_channel *channel = hw_ep->tx_channel; | 697 | struct dma_channel *channel = hw_ep->tx_channel; |
| 700 | u16 pkt_size = qh->maxpacket; | 698 | u16 pkt_size = qh->maxpacket; |
| 701 | u8 mode; | 699 | u8 mode; |
| 702 | int res; | ||
| 703 | 700 | ||
| 704 | if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb)) | 701 | if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb)) |
| 705 | res = musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, | 702 | musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset, |
| 706 | offset, &length, &mode); | 703 | &length, &mode); |
| 704 | else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb)) | ||
| 705 | musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset, | ||
| 706 | &length, &mode); | ||
| 707 | else | 707 | else |
| 708 | res = musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, | ||
| 709 | offset, &length, &mode); | ||
| 710 | if (res) | ||
| 711 | return false; | 708 | return false; |
| 712 | 709 | ||
| 713 | qh->segsize = length; | 710 | qh->segsize = length; |
| @@ -995,9 +992,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, | |||
| 995 | if (is_in) { | 992 | if (is_in) { |
| 996 | dma = is_dma_capable() ? ep->rx_channel : NULL; | 993 | dma = is_dma_capable() ? ep->rx_channel : NULL; |
| 997 | 994 | ||
| 998 | /* clear nak timeout bit */ | 995 | /* |
| 996 | * Need to stop the transaction by clearing REQPKT first | ||
| 997 | * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED | ||
| 998 | * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2 | ||
| 999 | */ | ||
| 999 | rx_csr = musb_readw(epio, MUSB_RXCSR); | 1000 | rx_csr = musb_readw(epio, MUSB_RXCSR); |
| 1000 | rx_csr |= MUSB_RXCSR_H_WZC_BITS; | 1001 | rx_csr |= MUSB_RXCSR_H_WZC_BITS; |
| 1002 | rx_csr &= ~MUSB_RXCSR_H_REQPKT; | ||
| 1003 | musb_writew(epio, MUSB_RXCSR, rx_csr); | ||
| 1001 | rx_csr &= ~MUSB_RXCSR_DATAERROR; | 1004 | rx_csr &= ~MUSB_RXCSR_DATAERROR; |
| 1002 | musb_writew(epio, MUSB_RXCSR, rx_csr); | 1005 | musb_writew(epio, MUSB_RXCSR, rx_csr); |
| 1003 | 1006 | ||
| @@ -1551,7 +1554,7 @@ static int musb_rx_dma_iso_cppi41(struct dma_controller *dma, | |||
| 1551 | struct urb *urb, | 1554 | struct urb *urb, |
| 1552 | size_t len) | 1555 | size_t len) |
| 1553 | { | 1556 | { |
| 1554 | struct dma_channel *channel = hw_ep->tx_channel; | 1557 | struct dma_channel *channel = hw_ep->rx_channel; |
| 1555 | void __iomem *epio = hw_ep->regs; | 1558 | void __iomem *epio = hw_ep->regs; |
| 1556 | dma_addr_t *buf; | 1559 | dma_addr_t *buf; |
| 1557 | u32 length, res; | 1560 | u32 length, res; |
| @@ -1870,6 +1873,9 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
| 1870 | status = -EPROTO; | 1873 | status = -EPROTO; |
| 1871 | musb_writeb(epio, MUSB_RXINTERVAL, 0); | 1874 | musb_writeb(epio, MUSB_RXINTERVAL, 0); |
| 1872 | 1875 | ||
| 1876 | rx_csr &= ~MUSB_RXCSR_H_ERROR; | ||
| 1877 | musb_writew(epio, MUSB_RXCSR, rx_csr); | ||
| 1878 | |||
| 1873 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { | 1879 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { |
| 1874 | 1880 | ||
| 1875 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { | 1881 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { |
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index c84e0322c108..0b4cec940386 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c | |||
| @@ -49,97 +49,14 @@ struct omap2430_glue { | |||
| 49 | enum musb_vbus_id_status status; | 49 | enum musb_vbus_id_status status; |
| 50 | struct work_struct omap_musb_mailbox_work; | 50 | struct work_struct omap_musb_mailbox_work; |
| 51 | struct device *control_otghs; | 51 | struct device *control_otghs; |
| 52 | bool cable_connected; | ||
| 53 | bool enabled; | ||
| 54 | bool powered; | ||
| 52 | }; | 55 | }; |
| 53 | #define glue_to_musb(g) platform_get_drvdata(g->musb) | 56 | #define glue_to_musb(g) platform_get_drvdata(g->musb) |
| 54 | 57 | ||
| 55 | static struct omap2430_glue *_glue; | 58 | static struct omap2430_glue *_glue; |
| 56 | 59 | ||
| 57 | static struct timer_list musb_idle_timer; | ||
| 58 | |||
| 59 | static void musb_do_idle(unsigned long _musb) | ||
| 60 | { | ||
| 61 | struct musb *musb = (void *)_musb; | ||
| 62 | unsigned long flags; | ||
| 63 | u8 power; | ||
| 64 | u8 devctl; | ||
| 65 | |||
| 66 | spin_lock_irqsave(&musb->lock, flags); | ||
| 67 | |||
| 68 | switch (musb->xceiv->otg->state) { | ||
| 69 | case OTG_STATE_A_WAIT_BCON: | ||
| 70 | |||
| 71 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 72 | if (devctl & MUSB_DEVCTL_BDEVICE) { | ||
| 73 | musb->xceiv->otg->state = OTG_STATE_B_IDLE; | ||
| 74 | MUSB_DEV_MODE(musb); | ||
| 75 | } else { | ||
| 76 | musb->xceiv->otg->state = OTG_STATE_A_IDLE; | ||
| 77 | MUSB_HST_MODE(musb); | ||
| 78 | } | ||
| 79 | break; | ||
| 80 | case OTG_STATE_A_SUSPEND: | ||
| 81 | /* finish RESUME signaling? */ | ||
| 82 | if (musb->port1_status & MUSB_PORT_STAT_RESUME) { | ||
| 83 | power = musb_readb(musb->mregs, MUSB_POWER); | ||
| 84 | power &= ~MUSB_POWER_RESUME; | ||
| 85 | dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power); | ||
| 86 | musb_writeb(musb->mregs, MUSB_POWER, power); | ||
| 87 | musb->is_active = 1; | ||
| 88 | musb->port1_status &= ~(USB_PORT_STAT_SUSPEND | ||
| 89 | | MUSB_PORT_STAT_RESUME); | ||
| 90 | musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; | ||
| 91 | usb_hcd_poll_rh_status(musb->hcd); | ||
| 92 | /* NOTE: it might really be A_WAIT_BCON ... */ | ||
| 93 | musb->xceiv->otg->state = OTG_STATE_A_HOST; | ||
| 94 | } | ||
| 95 | break; | ||
| 96 | case OTG_STATE_A_HOST: | ||
| 97 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 98 | if (devctl & MUSB_DEVCTL_BDEVICE) | ||
| 99 | musb->xceiv->otg->state = OTG_STATE_B_IDLE; | ||
| 100 | else | ||
| 101 | musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON; | ||
| 102 | default: | ||
| 103 | break; | ||
| 104 | } | ||
| 105 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 106 | } | ||
| 107 | |||
| 108 | |||
| 109 | static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout) | ||
| 110 | { | ||
| 111 | unsigned long default_timeout = jiffies + msecs_to_jiffies(3); | ||
| 112 | static unsigned long last_timer; | ||
| 113 | |||
| 114 | if (timeout == 0) | ||
| 115 | timeout = default_timeout; | ||
| 116 | |||
| 117 | /* Never idle if active, or when VBUS timeout is not set as host */ | ||
| 118 | if (musb->is_active || ((musb->a_wait_bcon == 0) | ||
| 119 | && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) { | ||
| 120 | dev_dbg(musb->controller, "%s active, deleting timer\n", | ||
| 121 | usb_otg_state_string(musb->xceiv->otg->state)); | ||
| 122 | del_timer(&musb_idle_timer); | ||
| 123 | last_timer = jiffies; | ||
| 124 | return; | ||
| 125 | } | ||
| 126 | |||
| 127 | if (time_after(last_timer, timeout)) { | ||
| 128 | if (!timer_pending(&musb_idle_timer)) | ||
| 129 | last_timer = timeout; | ||
| 130 | else { | ||
| 131 | dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n"); | ||
| 132 | return; | ||
| 133 | } | ||
| 134 | } | ||
| 135 | last_timer = timeout; | ||
| 136 | |||
| 137 | dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n", | ||
| 138 | usb_otg_state_string(musb->xceiv->otg->state), | ||
| 139 | (unsigned long)jiffies_to_msecs(timeout - jiffies)); | ||
| 140 | mod_timer(&musb_idle_timer, timeout); | ||
| 141 | } | ||
| 142 | |||
| 143 | static void omap2430_musb_set_vbus(struct musb *musb, int is_on) | 60 | static void omap2430_musb_set_vbus(struct musb *musb, int is_on) |
| 144 | { | 61 | { |
| 145 | struct usb_otg *otg = musb->xceiv->otg; | 62 | struct usb_otg *otg = musb->xceiv->otg; |
| @@ -205,16 +122,6 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on) | |||
| 205 | musb_readb(musb->mregs, MUSB_DEVCTL)); | 122 | musb_readb(musb->mregs, MUSB_DEVCTL)); |
| 206 | } | 123 | } |
| 207 | 124 | ||
| 208 | static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode) | ||
| 209 | { | ||
| 210 | u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 211 | |||
| 212 | devctl |= MUSB_DEVCTL_SESSION; | ||
| 213 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | ||
| 214 | |||
| 215 | return 0; | ||
| 216 | } | ||
| 217 | |||
| 218 | static inline void omap2430_low_level_exit(struct musb *musb) | 125 | static inline void omap2430_low_level_exit(struct musb *musb) |
| 219 | { | 126 | { |
| 220 | u32 l; | 127 | u32 l; |
| @@ -234,22 +141,63 @@ static inline void omap2430_low_level_init(struct musb *musb) | |||
| 234 | musb_writel(musb->mregs, OTG_FORCESTDBY, l); | 141 | musb_writel(musb->mregs, OTG_FORCESTDBY, l); |
| 235 | } | 142 | } |
| 236 | 143 | ||
| 237 | static void omap2430_musb_mailbox(enum musb_vbus_id_status status) | 144 | /* |
| 145 | * We can get multiple cable events so we need to keep track | ||
| 146 | * of the power state. Only keep power enabled if USB cable is | ||
| 147 | * connected and a gadget is started. | ||
| 148 | */ | ||
| 149 | static void omap2430_set_power(struct musb *musb, bool enabled, bool cable) | ||
| 150 | { | ||
| 151 | struct device *dev = musb->controller; | ||
| 152 | struct omap2430_glue *glue = dev_get_drvdata(dev->parent); | ||
| 153 | bool power_up; | ||
| 154 | int res; | ||
| 155 | |||
| 156 | if (glue->enabled != enabled) | ||
| 157 | glue->enabled = enabled; | ||
| 158 | |||
| 159 | if (glue->cable_connected != cable) | ||
| 160 | glue->cable_connected = cable; | ||
| 161 | |||
| 162 | power_up = glue->enabled && glue->cable_connected; | ||
| 163 | if (power_up == glue->powered) { | ||
| 164 | dev_warn(musb->controller, "power state already %i\n", | ||
| 165 | power_up); | ||
| 166 | return; | ||
| 167 | } | ||
| 168 | |||
| 169 | glue->powered = power_up; | ||
| 170 | |||
| 171 | if (power_up) { | ||
| 172 | res = pm_runtime_get_sync(musb->controller); | ||
| 173 | if (res < 0) { | ||
| 174 | dev_err(musb->controller, "could not enable: %i", res); | ||
| 175 | glue->powered = false; | ||
| 176 | } | ||
| 177 | } else { | ||
| 178 | pm_runtime_mark_last_busy(musb->controller); | ||
| 179 | pm_runtime_put_autosuspend(musb->controller); | ||
| 180 | } | ||
| 181 | } | ||
| 182 | |||
| 183 | static int omap2430_musb_mailbox(enum musb_vbus_id_status status) | ||
| 238 | { | 184 | { |
| 239 | struct omap2430_glue *glue = _glue; | 185 | struct omap2430_glue *glue = _glue; |
| 240 | 186 | ||
| 241 | if (!glue) { | 187 | if (!glue) { |
| 242 | pr_err("%s: musb core is not yet initialized\n", __func__); | 188 | pr_err("%s: musb core is not yet initialized\n", __func__); |
| 243 | return; | 189 | return -EPROBE_DEFER; |
| 244 | } | 190 | } |
| 245 | glue->status = status; | 191 | glue->status = status; |
| 246 | 192 | ||
| 247 | if (!glue_to_musb(glue)) { | 193 | if (!glue_to_musb(glue)) { |
| 248 | pr_err("%s: musb core is not yet ready\n", __func__); | 194 | pr_err("%s: musb core is not yet ready\n", __func__); |
| 249 | return; | 195 | return -EPROBE_DEFER; |
| 250 | } | 196 | } |
| 251 | 197 | ||
| 252 | schedule_work(&glue->omap_musb_mailbox_work); | 198 | schedule_work(&glue->omap_musb_mailbox_work); |
| 199 | |||
| 200 | return 0; | ||
| 253 | } | 201 | } |
| 254 | 202 | ||
| 255 | static void omap_musb_set_mailbox(struct omap2430_glue *glue) | 203 | static void omap_musb_set_mailbox(struct omap2430_glue *glue) |
| @@ -259,6 +207,13 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) | |||
| 259 | struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); | 207 | struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); |
| 260 | struct omap_musb_board_data *data = pdata->board_data; | 208 | struct omap_musb_board_data *data = pdata->board_data; |
| 261 | struct usb_otg *otg = musb->xceiv->otg; | 209 | struct usb_otg *otg = musb->xceiv->otg; |
| 210 | bool cable_connected; | ||
| 211 | |||
| 212 | cable_connected = ((glue->status == MUSB_ID_GROUND) || | ||
| 213 | (glue->status == MUSB_VBUS_VALID)); | ||
| 214 | |||
| 215 | if (cable_connected) | ||
| 216 | omap2430_set_power(musb, glue->enabled, cable_connected); | ||
| 262 | 217 | ||
| 263 | switch (glue->status) { | 218 | switch (glue->status) { |
| 264 | case MUSB_ID_GROUND: | 219 | case MUSB_ID_GROUND: |
| @@ -268,7 +223,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) | |||
| 268 | musb->xceiv->otg->state = OTG_STATE_A_IDLE; | 223 | musb->xceiv->otg->state = OTG_STATE_A_IDLE; |
| 269 | musb->xceiv->last_event = USB_EVENT_ID; | 224 | musb->xceiv->last_event = USB_EVENT_ID; |
| 270 | if (musb->gadget_driver) { | 225 | if (musb->gadget_driver) { |
| 271 | pm_runtime_get_sync(dev); | ||
| 272 | omap_control_usb_set_mode(glue->control_otghs, | 226 | omap_control_usb_set_mode(glue->control_otghs, |
| 273 | USB_MODE_HOST); | 227 | USB_MODE_HOST); |
| 274 | omap2430_musb_set_vbus(musb, 1); | 228 | omap2430_musb_set_vbus(musb, 1); |
| @@ -281,8 +235,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) | |||
| 281 | otg->default_a = false; | 235 | otg->default_a = false; |
| 282 | musb->xceiv->otg->state = OTG_STATE_B_IDLE; | 236 | musb->xceiv->otg->state = OTG_STATE_B_IDLE; |
| 283 | musb->xceiv->last_event = USB_EVENT_VBUS; | 237 | musb->xceiv->last_event = USB_EVENT_VBUS; |
| 284 | if (musb->gadget_driver) | ||
| 285 | pm_runtime_get_sync(dev); | ||
| 286 | omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE); | 238 | omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE); |
| 287 | break; | 239 | break; |
| 288 | 240 | ||
| @@ -291,11 +243,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) | |||
| 291 | dev_dbg(dev, "VBUS Disconnect\n"); | 243 | dev_dbg(dev, "VBUS Disconnect\n"); |
| 292 | 244 | ||
| 293 | musb->xceiv->last_event = USB_EVENT_NONE; | 245 | musb->xceiv->last_event = USB_EVENT_NONE; |
| 294 | if (musb->gadget_driver) { | 246 | if (musb->gadget_driver) |
| 295 | omap2430_musb_set_vbus(musb, 0); | 247 | omap2430_musb_set_vbus(musb, 0); |
| 296 | pm_runtime_mark_last_busy(dev); | ||
| 297 | pm_runtime_put_autosuspend(dev); | ||
| 298 | } | ||
| 299 | 248 | ||
| 300 | if (data->interface_type == MUSB_INTERFACE_UTMI) | 249 | if (data->interface_type == MUSB_INTERFACE_UTMI) |
| 301 | otg_set_vbus(musb->xceiv->otg, 0); | 250 | otg_set_vbus(musb->xceiv->otg, 0); |
| @@ -307,6 +256,9 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) | |||
| 307 | dev_dbg(dev, "ID float\n"); | 256 | dev_dbg(dev, "ID float\n"); |
| 308 | } | 257 | } |
| 309 | 258 | ||
| 259 | if (!cable_connected) | ||
| 260 | omap2430_set_power(musb, glue->enabled, cable_connected); | ||
| 261 | |||
| 310 | atomic_notifier_call_chain(&musb->xceiv->notifier, | 262 | atomic_notifier_call_chain(&musb->xceiv->notifier, |
| 311 | musb->xceiv->last_event, NULL); | 263 | musb->xceiv->last_event, NULL); |
| 312 | } | 264 | } |
| @@ -316,13 +268,8 @@ static void omap_musb_mailbox_work(struct work_struct *mailbox_work) | |||
| 316 | { | 268 | { |
| 317 | struct omap2430_glue *glue = container_of(mailbox_work, | 269 | struct omap2430_glue *glue = container_of(mailbox_work, |
| 318 | struct omap2430_glue, omap_musb_mailbox_work); | 270 | struct omap2430_glue, omap_musb_mailbox_work); |
| 319 | struct musb *musb = glue_to_musb(glue); | ||
| 320 | struct device *dev = musb->controller; | ||
| 321 | 271 | ||
| 322 | pm_runtime_get_sync(dev); | ||
| 323 | omap_musb_set_mailbox(glue); | 272 | omap_musb_set_mailbox(glue); |
| 324 | pm_runtime_mark_last_busy(dev); | ||
| 325 | pm_runtime_put_autosuspend(dev); | ||
| 326 | } | 273 | } |
| 327 | 274 | ||
| 328 | static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci) | 275 | static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci) |
| @@ -389,23 +336,7 @@ static int omap2430_musb_init(struct musb *musb) | |||
| 389 | return PTR_ERR(musb->phy); | 336 | return PTR_ERR(musb->phy); |
| 390 | } | 337 | } |
| 391 | musb->isr = omap2430_musb_interrupt; | 338 | musb->isr = omap2430_musb_interrupt; |
| 392 | 339 | phy_init(musb->phy); | |
| 393 | /* | ||
| 394 | * Enable runtime PM for musb parent (this driver). We can't | ||
| 395 | * do it earlier as struct musb is not yet allocated and we | ||
| 396 | * need to touch the musb registers for runtime PM. | ||
| 397 | */ | ||
| 398 | pm_runtime_enable(glue->dev); | ||
| 399 | status = pm_runtime_get_sync(glue->dev); | ||
| 400 | if (status < 0) | ||
| 401 | goto err1; | ||
| 402 | |||
| 403 | status = pm_runtime_get_sync(dev); | ||
| 404 | if (status < 0) { | ||
| 405 | dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status); | ||
| 406 | pm_runtime_put_sync(glue->dev); | ||
| 407 | goto err1; | ||
| 408 | } | ||
| 409 | 340 | ||
| 410 | l = musb_readl(musb->mregs, OTG_INTERFSEL); | 341 | l = musb_readl(musb->mregs, OTG_INTERFSEL); |
| 411 | 342 | ||
| @@ -427,20 +358,10 @@ static int omap2430_musb_init(struct musb *musb) | |||
| 427 | musb_readl(musb->mregs, OTG_INTERFSEL), | 358 | musb_readl(musb->mregs, OTG_INTERFSEL), |
| 428 | musb_readl(musb->mregs, OTG_SIMENABLE)); | 359 | musb_readl(musb->mregs, OTG_SIMENABLE)); |
| 429 | 360 | ||
| 430 | setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); | ||
| 431 | |||
| 432 | if (glue->status != MUSB_UNKNOWN) | 361 | if (glue->status != MUSB_UNKNOWN) |
| 433 | omap_musb_set_mailbox(glue); | 362 | omap_musb_set_mailbox(glue); |
| 434 | 363 | ||
| 435 | phy_init(musb->phy); | ||
| 436 | phy_power_on(musb->phy); | ||
| 437 | |||
| 438 | pm_runtime_put_noidle(musb->controller); | ||
| 439 | pm_runtime_put_noidle(glue->dev); | ||
| 440 | return 0; | 364 | return 0; |
| 441 | |||
| 442 | err1: | ||
| 443 | return status; | ||
| 444 | } | 365 | } |
| 445 | 366 | ||
| 446 | static void omap2430_musb_enable(struct musb *musb) | 367 | static void omap2430_musb_enable(struct musb *musb) |
| @@ -452,6 +373,11 @@ static void omap2430_musb_enable(struct musb *musb) | |||
| 452 | struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); | 373 | struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); |
| 453 | struct omap_musb_board_data *data = pdata->board_data; | 374 | struct omap_musb_board_data *data = pdata->board_data; |
| 454 | 375 | ||
| 376 | if (!WARN_ON(!musb->phy)) | ||
| 377 | phy_power_on(musb->phy); | ||
| 378 | |||
| 379 | omap2430_set_power(musb, true, glue->cable_connected); | ||
| 380 | |||
| 455 | switch (glue->status) { | 381 | switch (glue->status) { |
| 456 | 382 | ||
| 457 | case MUSB_ID_GROUND: | 383 | case MUSB_ID_GROUND: |
| @@ -487,18 +413,25 @@ static void omap2430_musb_disable(struct musb *musb) | |||
| 487 | struct device *dev = musb->controller; | 413 | struct device *dev = musb->controller; |
| 488 | struct omap2430_glue *glue = dev_get_drvdata(dev->parent); | 414 | struct omap2430_glue *glue = dev_get_drvdata(dev->parent); |
| 489 | 415 | ||
| 416 | if (!WARN_ON(!musb->phy)) | ||
| 417 | phy_power_off(musb->phy); | ||
| 418 | |||
| 490 | if (glue->status != MUSB_UNKNOWN) | 419 | if (glue->status != MUSB_UNKNOWN) |
| 491 | omap_control_usb_set_mode(glue->control_otghs, | 420 | omap_control_usb_set_mode(glue->control_otghs, |
| 492 | USB_MODE_DISCONNECT); | 421 | USB_MODE_DISCONNECT); |
| 422 | |||
| 423 | omap2430_set_power(musb, false, glue->cable_connected); | ||
| 493 | } | 424 | } |
| 494 | 425 | ||
| 495 | static int omap2430_musb_exit(struct musb *musb) | 426 | static int omap2430_musb_exit(struct musb *musb) |
| 496 | { | 427 | { |
| 497 | del_timer_sync(&musb_idle_timer); | 428 | struct device *dev = musb->controller; |
| 429 | struct omap2430_glue *glue = dev_get_drvdata(dev->parent); | ||
| 498 | 430 | ||
| 499 | omap2430_low_level_exit(musb); | 431 | omap2430_low_level_exit(musb); |
| 500 | phy_power_off(musb->phy); | ||
| 501 | phy_exit(musb->phy); | 432 | phy_exit(musb->phy); |
| 433 | musb->phy = NULL; | ||
| 434 | cancel_work_sync(&glue->omap_musb_mailbox_work); | ||
| 502 | 435 | ||
| 503 | return 0; | 436 | return 0; |
| 504 | } | 437 | } |
| @@ -512,9 +445,6 @@ static const struct musb_platform_ops omap2430_ops = { | |||
| 512 | .init = omap2430_musb_init, | 445 | .init = omap2430_musb_init, |
| 513 | .exit = omap2430_musb_exit, | 446 | .exit = omap2430_musb_exit, |
| 514 | 447 | ||
| 515 | .set_mode = omap2430_musb_set_mode, | ||
| 516 | .try_idle = omap2430_musb_try_idle, | ||
| 517 | |||
| 518 | .set_vbus = omap2430_musb_set_vbus, | 448 | .set_vbus = omap2430_musb_set_vbus, |
| 519 | 449 | ||
| 520 | .enable = omap2430_musb_enable, | 450 | .enable = omap2430_musb_enable, |
| @@ -639,11 +569,9 @@ static int omap2430_probe(struct platform_device *pdev) | |||
| 639 | goto err2; | 569 | goto err2; |
| 640 | } | 570 | } |
| 641 | 571 | ||
| 642 | /* | 572 | pm_runtime_enable(glue->dev); |
| 643 | * Note that we cannot enable PM runtime yet for this | 573 | pm_runtime_use_autosuspend(glue->dev); |
| 644 | * driver as we need struct musb initialized first. | 574 | pm_runtime_set_autosuspend_delay(glue->dev, 500); |
| 645 | * See omap2430_musb_init above. | ||
| 646 | */ | ||
| 647 | 575 | ||
| 648 | ret = platform_device_add(musb); | 576 | ret = platform_device_add(musb); |
| 649 | if (ret) { | 577 | if (ret) { |
| @@ -662,12 +590,14 @@ err0: | |||
| 662 | 590 | ||
| 663 | static int omap2430_remove(struct platform_device *pdev) | 591 | static int omap2430_remove(struct platform_device *pdev) |
| 664 | { | 592 | { |
| 665 | struct omap2430_glue *glue = platform_get_drvdata(pdev); | 593 | struct omap2430_glue *glue = platform_get_drvdata(pdev); |
| 594 | struct musb *musb = glue_to_musb(glue); | ||
| 666 | 595 | ||
| 667 | pm_runtime_get_sync(glue->dev); | 596 | pm_runtime_get_sync(glue->dev); |
| 668 | cancel_work_sync(&glue->omap_musb_mailbox_work); | ||
| 669 | platform_device_unregister(glue->musb); | 597 | platform_device_unregister(glue->musb); |
| 598 | omap2430_set_power(musb, false, false); | ||
| 670 | pm_runtime_put_sync(glue->dev); | 599 | pm_runtime_put_sync(glue->dev); |
| 600 | pm_runtime_dont_use_autosuspend(glue->dev); | ||
| 671 | pm_runtime_disable(glue->dev); | 601 | pm_runtime_disable(glue->dev); |
| 672 | 602 | ||
| 673 | return 0; | 603 | return 0; |
| @@ -680,12 +610,13 @@ static int omap2430_runtime_suspend(struct device *dev) | |||
| 680 | struct omap2430_glue *glue = dev_get_drvdata(dev); | 610 | struct omap2430_glue *glue = dev_get_drvdata(dev); |
| 681 | struct musb *musb = glue_to_musb(glue); | 611 | struct musb *musb = glue_to_musb(glue); |
| 682 | 612 | ||
| 683 | if (musb) { | 613 | if (!musb) |
| 684 | musb->context.otg_interfsel = musb_readl(musb->mregs, | 614 | return 0; |
| 685 | OTG_INTERFSEL); | ||
| 686 | 615 | ||
| 687 | omap2430_low_level_exit(musb); | 616 | musb->context.otg_interfsel = musb_readl(musb->mregs, |
| 688 | } | 617 | OTG_INTERFSEL); |
| 618 | |||
| 619 | omap2430_low_level_exit(musb); | ||
| 689 | 620 | ||
| 690 | return 0; | 621 | return 0; |
| 691 | } | 622 | } |
| @@ -696,7 +627,7 @@ static int omap2430_runtime_resume(struct device *dev) | |||
| 696 | struct musb *musb = glue_to_musb(glue); | 627 | struct musb *musb = glue_to_musb(glue); |
| 697 | 628 | ||
| 698 | if (!musb) | 629 | if (!musb) |
| 699 | return -EPROBE_DEFER; | 630 | return 0; |
| 700 | 631 | ||
| 701 | omap2430_low_level_init(musb); | 632 | omap2430_low_level_init(musb); |
| 702 | musb_writel(musb->mregs, OTG_INTERFSEL, | 633 | musb_writel(musb->mregs, OTG_INTERFSEL, |
| @@ -738,18 +669,8 @@ static struct platform_driver omap2430_driver = { | |||
| 738 | }, | 669 | }, |
| 739 | }; | 670 | }; |
| 740 | 671 | ||
| 672 | module_platform_driver(omap2430_driver); | ||
| 673 | |||
| 741 | MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer"); | 674 | MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer"); |
| 742 | MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); | 675 | MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); |
| 743 | MODULE_LICENSE("GPL v2"); | 676 | MODULE_LICENSE("GPL v2"); |
| 744 | |||
| 745 | static int __init omap2430_init(void) | ||
| 746 | { | ||
| 747 | return platform_driver_register(&omap2430_driver); | ||
| 748 | } | ||
| 749 | subsys_initcall(omap2430_init); | ||
| 750 | |||
| 751 | static void __exit omap2430_exit(void) | ||
| 752 | { | ||
| 753 | platform_driver_unregister(&omap2430_driver); | ||
| 754 | } | ||
| 755 | module_exit(omap2430_exit); | ||
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c index fdab4232cfbf..76500515dd8b 100644 --- a/drivers/usb/musb/sunxi.c +++ b/drivers/usb/musb/sunxi.c | |||
| @@ -80,7 +80,8 @@ static struct musb *sunxi_musb; | |||
| 80 | 80 | ||
| 81 | struct sunxi_glue { | 81 | struct sunxi_glue { |
| 82 | struct device *dev; | 82 | struct device *dev; |
| 83 | struct platform_device *musb; | 83 | struct musb *musb; |
| 84 | struct platform_device *musb_pdev; | ||
| 84 | struct clk *clk; | 85 | struct clk *clk; |
| 85 | struct reset_control *rst; | 86 | struct reset_control *rst; |
| 86 | struct phy *phy; | 87 | struct phy *phy; |
| @@ -102,7 +103,7 @@ static void sunxi_musb_work(struct work_struct *work) | |||
| 102 | return; | 103 | return; |
| 103 | 104 | ||
| 104 | if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) { | 105 | if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) { |
| 105 | struct musb *musb = platform_get_drvdata(glue->musb); | 106 | struct musb *musb = glue->musb; |
| 106 | unsigned long flags; | 107 | unsigned long flags; |
| 107 | u8 devctl; | 108 | u8 devctl; |
| 108 | 109 | ||
| @@ -112,7 +113,7 @@ static void sunxi_musb_work(struct work_struct *work) | |||
| 112 | if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) { | 113 | if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) { |
| 113 | set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); | 114 | set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); |
| 114 | musb->xceiv->otg->default_a = 1; | 115 | musb->xceiv->otg->default_a = 1; |
| 115 | musb->xceiv->otg->state = OTG_STATE_A_IDLE; | 116 | musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; |
| 116 | MUSB_HST_MODE(musb); | 117 | MUSB_HST_MODE(musb); |
| 117 | devctl |= MUSB_DEVCTL_SESSION; | 118 | devctl |= MUSB_DEVCTL_SESSION; |
| 118 | } else { | 119 | } else { |
| @@ -145,10 +146,12 @@ static void sunxi_musb_set_vbus(struct musb *musb, int is_on) | |||
| 145 | { | 146 | { |
| 146 | struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); | 147 | struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); |
| 147 | 148 | ||
| 148 | if (is_on) | 149 | if (is_on) { |
| 149 | set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); | 150 | set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); |
| 150 | else | 151 | musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; |
| 152 | } else { | ||
| 151 | clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); | 153 | clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); |
| 154 | } | ||
| 152 | 155 | ||
| 153 | schedule_work(&glue->work); | 156 | schedule_work(&glue->work); |
| 154 | } | 157 | } |
| @@ -264,15 +267,6 @@ static int sunxi_musb_init(struct musb *musb) | |||
| 264 | if (ret) | 267 | if (ret) |
| 265 | goto error_unregister_notifier; | 268 | goto error_unregister_notifier; |
| 266 | 269 | ||
| 267 | if (musb->port_mode == MUSB_PORT_MODE_HOST) { | ||
| 268 | ret = phy_power_on(glue->phy); | ||
| 269 | if (ret) | ||
| 270 | goto error_phy_exit; | ||
| 271 | set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags); | ||
| 272 | /* Stop musb work from turning vbus off again */ | ||
| 273 | set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); | ||
| 274 | } | ||
| 275 | |||
| 276 | musb->isr = sunxi_musb_interrupt; | 270 | musb->isr = sunxi_musb_interrupt; |
| 277 | 271 | ||
| 278 | /* Stop the musb-core from doing runtime pm (not supported on sunxi) */ | 272 | /* Stop the musb-core from doing runtime pm (not supported on sunxi) */ |
| @@ -280,8 +274,6 @@ static int sunxi_musb_init(struct musb *musb) | |||
| 280 | 274 | ||
| 281 | return 0; | 275 | return 0; |
| 282 | 276 | ||
| 283 | error_phy_exit: | ||
| 284 | phy_exit(glue->phy); | ||
| 285 | error_unregister_notifier: | 277 | error_unregister_notifier: |
| 286 | if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE) | 278 | if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE) |
| 287 | extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST, | 279 | extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST, |
| @@ -323,10 +315,31 @@ static int sunxi_musb_exit(struct musb *musb) | |||
| 323 | return 0; | 315 | return 0; |
| 324 | } | 316 | } |
| 325 | 317 | ||
| 318 | static int sunxi_set_mode(struct musb *musb, u8 mode) | ||
| 319 | { | ||
| 320 | struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); | ||
| 321 | int ret; | ||
| 322 | |||
| 323 | if (mode == MUSB_HOST) { | ||
| 324 | ret = phy_power_on(glue->phy); | ||
| 325 | if (ret) | ||
| 326 | return ret; | ||
| 327 | |||
| 328 | set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags); | ||
| 329 | /* Stop musb work from turning vbus off again */ | ||
| 330 | set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); | ||
| 331 | musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; | ||
| 332 | } | ||
| 333 | |||
| 334 | return 0; | ||
| 335 | } | ||
| 336 | |||
| 326 | static void sunxi_musb_enable(struct musb *musb) | 337 | static void sunxi_musb_enable(struct musb *musb) |
| 327 | { | 338 | { |
| 328 | struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); | 339 | struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); |
| 329 | 340 | ||
| 341 | glue->musb = musb; | ||
| 342 | |||
| 330 | /* musb_core does not call us in a balanced manner */ | 343 | /* musb_core does not call us in a balanced manner */ |
| 331 | if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags)) | 344 | if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags)) |
| 332 | return; | 345 | return; |
| @@ -569,6 +582,7 @@ static const struct musb_platform_ops sunxi_musb_ops = { | |||
| 569 | .exit = sunxi_musb_exit, | 582 | .exit = sunxi_musb_exit, |
| 570 | .enable = sunxi_musb_enable, | 583 | .enable = sunxi_musb_enable, |
| 571 | .disable = sunxi_musb_disable, | 584 | .disable = sunxi_musb_disable, |
| 585 | .set_mode = sunxi_set_mode, | ||
| 572 | .fifo_offset = sunxi_musb_fifo_offset, | 586 | .fifo_offset = sunxi_musb_fifo_offset, |
| 573 | .ep_offset = sunxi_musb_ep_offset, | 587 | .ep_offset = sunxi_musb_ep_offset, |
| 574 | .busctl_offset = sunxi_musb_busctl_offset, | 588 | .busctl_offset = sunxi_musb_busctl_offset, |
| @@ -721,9 +735,9 @@ static int sunxi_musb_probe(struct platform_device *pdev) | |||
| 721 | pinfo.data = &pdata; | 735 | pinfo.data = &pdata; |
| 722 | pinfo.size_data = sizeof(pdata); | 736 | pinfo.size_data = sizeof(pdata); |
| 723 | 737 | ||
| 724 | glue->musb = platform_device_register_full(&pinfo); | 738 | glue->musb_pdev = platform_device_register_full(&pinfo); |
| 725 | if (IS_ERR(glue->musb)) { | 739 | if (IS_ERR(glue->musb_pdev)) { |
| 726 | ret = PTR_ERR(glue->musb); | 740 | ret = PTR_ERR(glue->musb_pdev); |
| 727 | dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret); | 741 | dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret); |
| 728 | goto err_unregister_usb_phy; | 742 | goto err_unregister_usb_phy; |
| 729 | } | 743 | } |
| @@ -740,7 +754,7 @@ static int sunxi_musb_remove(struct platform_device *pdev) | |||
| 740 | struct sunxi_glue *glue = platform_get_drvdata(pdev); | 754 | struct sunxi_glue *glue = platform_get_drvdata(pdev); |
| 741 | struct platform_device *usb_phy = glue->usb_phy; | 755 | struct platform_device *usb_phy = glue->usb_phy; |
| 742 | 756 | ||
| 743 | platform_device_unregister(glue->musb); /* Frees glue ! */ | 757 | platform_device_unregister(glue->musb_pdev); |
| 744 | usb_phy_generic_unregister(usb_phy); | 758 | usb_phy_generic_unregister(usb_phy); |
| 745 | 759 | ||
| 746 | return 0; | 760 | return 0; |
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index 24e2b3cf1867..a72e8d670adc 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c | |||
| @@ -97,6 +97,9 @@ struct twl6030_usb { | |||
| 97 | 97 | ||
| 98 | struct regulator *usb3v3; | 98 | struct regulator *usb3v3; |
| 99 | 99 | ||
| 100 | /* used to check initial cable status after probe */ | ||
| 101 | struct delayed_work get_status_work; | ||
| 102 | |||
| 100 | /* used to set vbus, in atomic path */ | 103 | /* used to set vbus, in atomic path */ |
| 101 | struct work_struct set_vbus_work; | 104 | struct work_struct set_vbus_work; |
| 102 | 105 | ||
| @@ -227,12 +230,16 @@ static irqreturn_t twl6030_usb_irq(int irq, void *_twl) | |||
| 227 | twl->asleep = 1; | 230 | twl->asleep = 1; |
| 228 | status = MUSB_VBUS_VALID; | 231 | status = MUSB_VBUS_VALID; |
| 229 | twl->linkstat = status; | 232 | twl->linkstat = status; |
| 230 | musb_mailbox(status); | 233 | ret = musb_mailbox(status); |
| 234 | if (ret) | ||
| 235 | twl->linkstat = MUSB_UNKNOWN; | ||
| 231 | } else { | 236 | } else { |
| 232 | if (twl->linkstat != MUSB_UNKNOWN) { | 237 | if (twl->linkstat != MUSB_UNKNOWN) { |
| 233 | status = MUSB_VBUS_OFF; | 238 | status = MUSB_VBUS_OFF; |
| 234 | twl->linkstat = status; | 239 | twl->linkstat = status; |
| 235 | musb_mailbox(status); | 240 | ret = musb_mailbox(status); |
| 241 | if (ret) | ||
| 242 | twl->linkstat = MUSB_UNKNOWN; | ||
| 236 | if (twl->asleep) { | 243 | if (twl->asleep) { |
| 237 | regulator_disable(twl->usb3v3); | 244 | regulator_disable(twl->usb3v3); |
| 238 | twl->asleep = 0; | 245 | twl->asleep = 0; |
| @@ -264,7 +271,9 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl) | |||
| 264 | twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET); | 271 | twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET); |
| 265 | status = MUSB_ID_GROUND; | 272 | status = MUSB_ID_GROUND; |
| 266 | twl->linkstat = status; | 273 | twl->linkstat = status; |
| 267 | musb_mailbox(status); | 274 | ret = musb_mailbox(status); |
| 275 | if (ret) | ||
| 276 | twl->linkstat = MUSB_UNKNOWN; | ||
| 268 | } else { | 277 | } else { |
| 269 | twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR); | 278 | twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR); |
| 270 | twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET); | 279 | twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET); |
| @@ -274,6 +283,15 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl) | |||
| 274 | return IRQ_HANDLED; | 283 | return IRQ_HANDLED; |
| 275 | } | 284 | } |
| 276 | 285 | ||
| 286 | static void twl6030_status_work(struct work_struct *work) | ||
| 287 | { | ||
| 288 | struct twl6030_usb *twl = container_of(work, struct twl6030_usb, | ||
| 289 | get_status_work.work); | ||
| 290 | |||
| 291 | twl6030_usb_irq(twl->irq2, twl); | ||
| 292 | twl6030_usbotg_irq(twl->irq1, twl); | ||
| 293 | } | ||
| 294 | |||
| 277 | static int twl6030_enable_irq(struct twl6030_usb *twl) | 295 | static int twl6030_enable_irq(struct twl6030_usb *twl) |
| 278 | { | 296 | { |
| 279 | twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET); | 297 | twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET); |
| @@ -284,8 +302,6 @@ static int twl6030_enable_irq(struct twl6030_usb *twl) | |||
| 284 | REG_INT_MSK_LINE_C); | 302 | REG_INT_MSK_LINE_C); |
| 285 | twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK, | 303 | twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK, |
| 286 | REG_INT_MSK_STS_C); | 304 | REG_INT_MSK_STS_C); |
| 287 | twl6030_usb_irq(twl->irq2, twl); | ||
| 288 | twl6030_usbotg_irq(twl->irq1, twl); | ||
| 289 | 305 | ||
| 290 | return 0; | 306 | return 0; |
| 291 | } | 307 | } |
| @@ -371,6 +387,7 @@ static int twl6030_usb_probe(struct platform_device *pdev) | |||
| 371 | dev_warn(&pdev->dev, "could not create sysfs file\n"); | 387 | dev_warn(&pdev->dev, "could not create sysfs file\n"); |
| 372 | 388 | ||
| 373 | INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work); | 389 | INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work); |
| 390 | INIT_DELAYED_WORK(&twl->get_status_work, twl6030_status_work); | ||
| 374 | 391 | ||
| 375 | status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq, | 392 | status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq, |
| 376 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT, | 393 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT, |
| @@ -395,6 +412,7 @@ static int twl6030_usb_probe(struct platform_device *pdev) | |||
| 395 | 412 | ||
| 396 | twl->asleep = 0; | 413 | twl->asleep = 0; |
| 397 | twl6030_enable_irq(twl); | 414 | twl6030_enable_irq(twl); |
| 415 | schedule_delayed_work(&twl->get_status_work, HZ); | ||
| 398 | dev_info(&pdev->dev, "Initialized TWL6030 USB module\n"); | 416 | dev_info(&pdev->dev, "Initialized TWL6030 USB module\n"); |
| 399 | 417 | ||
| 400 | return 0; | 418 | return 0; |
| @@ -404,6 +422,7 @@ static int twl6030_usb_remove(struct platform_device *pdev) | |||
| 404 | { | 422 | { |
| 405 | struct twl6030_usb *twl = platform_get_drvdata(pdev); | 423 | struct twl6030_usb *twl = platform_get_drvdata(pdev); |
| 406 | 424 | ||
| 425 | cancel_delayed_work(&twl->get_status_work); | ||
| 407 | twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK, | 426 | twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK, |
| 408 | REG_INT_MSK_LINE_C); | 427 | REG_INT_MSK_LINE_C); |
| 409 | twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK, | 428 | twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK, |
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 2eddbe538cda..5608af4a369d 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c | |||
| @@ -2007,6 +2007,7 @@ static void mos7720_release(struct usb_serial *serial) | |||
| 2007 | urblist_entry) | 2007 | urblist_entry) |
| 2008 | usb_unlink_urb(urbtrack->urb); | 2008 | usb_unlink_urb(urbtrack->urb); |
| 2009 | spin_unlock_irqrestore(&mos_parport->listlock, flags); | 2009 | spin_unlock_irqrestore(&mos_parport->listlock, flags); |
| 2010 | parport_del_port(mos_parport->pp); | ||
| 2010 | 2011 | ||
| 2011 | kref_put(&mos_parport->ref_count, destroy_mos_parport); | 2012 | kref_put(&mos_parport->ref_count, destroy_mos_parport); |
| 2012 | } | 2013 | } |
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 4d49fce406e1..5ef014ba6ae8 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c | |||
| @@ -836,6 +836,7 @@ static int uas_slave_configure(struct scsi_device *sdev) | |||
| 836 | if (devinfo->flags & US_FL_BROKEN_FUA) | 836 | if (devinfo->flags & US_FL_BROKEN_FUA) |
| 837 | sdev->broken_fua = 1; | 837 | sdev->broken_fua = 1; |
| 838 | 838 | ||
| 839 | scsi_change_queue_depth(sdev, devinfo->qdepth - 2); | ||
| 839 | return 0; | 840 | return 0; |
| 840 | } | 841 | } |
| 841 | 842 | ||
| @@ -848,7 +849,6 @@ static struct scsi_host_template uas_host_template = { | |||
| 848 | .slave_configure = uas_slave_configure, | 849 | .slave_configure = uas_slave_configure, |
| 849 | .eh_abort_handler = uas_eh_abort_handler, | 850 | .eh_abort_handler = uas_eh_abort_handler, |
| 850 | .eh_bus_reset_handler = uas_eh_bus_reset_handler, | 851 | .eh_bus_reset_handler = uas_eh_bus_reset_handler, |
| 851 | .can_queue = MAX_CMNDS, | ||
| 852 | .this_id = -1, | 852 | .this_id = -1, |
| 853 | .sg_tablesize = SG_NONE, | 853 | .sg_tablesize = SG_NONE, |
| 854 | .skip_settle_delay = 1, | 854 | .skip_settle_delay = 1, |
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index fca51105974e..2e0450bec1b1 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c | |||
| @@ -941,7 +941,7 @@ static void vhci_stop(struct usb_hcd *hcd) | |||
| 941 | 941 | ||
| 942 | static int vhci_get_frame_number(struct usb_hcd *hcd) | 942 | static int vhci_get_frame_number(struct usb_hcd *hcd) |
| 943 | { | 943 | { |
| 944 | pr_err("Not yet implemented\n"); | 944 | dev_err_ratelimited(&hcd->self.root_hub->dev, "Not yet implemented\n"); |
| 945 | return 0; | 945 | return 0; |
| 946 | } | 946 | } |
| 947 | 947 | ||
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index b54f26c55dfd..b4b3e256491b 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
| @@ -746,7 +746,7 @@ config ALIM7101_WDT | |||
| 746 | 746 | ||
| 747 | config EBC_C384_WDT | 747 | config EBC_C384_WDT |
| 748 | tristate "WinSystems EBC-C384 Watchdog Timer" | 748 | tristate "WinSystems EBC-C384 Watchdog Timer" |
| 749 | depends on X86 && ISA | 749 | depends on X86 && ISA_BUS_API |
| 750 | select WATCHDOG_CORE | 750 | select WATCHDOG_CORE |
| 751 | help | 751 | help |
| 752 | Enables watchdog timer support for the watchdog timer on the | 752 | Enables watchdog timer support for the watchdog timer on the |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index d46839f51e73..e4db19e88ab1 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
| @@ -151,8 +151,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); | |||
| 151 | static void balloon_process(struct work_struct *work); | 151 | static void balloon_process(struct work_struct *work); |
| 152 | static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); | 152 | static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); |
| 153 | 153 | ||
| 154 | static void release_memory_resource(struct resource *resource); | ||
| 155 | |||
| 156 | /* When ballooning out (allocating memory to return to Xen) we don't really | 154 | /* When ballooning out (allocating memory to return to Xen) we don't really |
| 157 | want the kernel to try too hard since that can trigger the oom killer. */ | 155 | want the kernel to try too hard since that can trigger the oom killer. */ |
| 158 | #define GFP_BALLOON \ | 156 | #define GFP_BALLOON \ |
| @@ -248,6 +246,19 @@ static enum bp_state update_schedule(enum bp_state state) | |||
| 248 | } | 246 | } |
| 249 | 247 | ||
| 250 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG | 248 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG |
| 249 | static void release_memory_resource(struct resource *resource) | ||
| 250 | { | ||
| 251 | if (!resource) | ||
| 252 | return; | ||
| 253 | |||
| 254 | /* | ||
| 255 | * No need to reset region to identity mapped since we now | ||
| 256 | * know that no I/O can be in this region | ||
| 257 | */ | ||
| 258 | release_resource(resource); | ||
| 259 | kfree(resource); | ||
| 260 | } | ||
| 261 | |||
| 251 | static struct resource *additional_memory_resource(phys_addr_t size) | 262 | static struct resource *additional_memory_resource(phys_addr_t size) |
| 252 | { | 263 | { |
| 253 | struct resource *res; | 264 | struct resource *res; |
| @@ -286,19 +297,6 @@ static struct resource *additional_memory_resource(phys_addr_t size) | |||
| 286 | return res; | 297 | return res; |
| 287 | } | 298 | } |
| 288 | 299 | ||
| 289 | static void release_memory_resource(struct resource *resource) | ||
| 290 | { | ||
| 291 | if (!resource) | ||
| 292 | return; | ||
| 293 | |||
| 294 | /* | ||
| 295 | * No need to reset region to identity mapped since we now | ||
| 296 | * know that no I/O can be in this region | ||
| 297 | */ | ||
| 298 | release_resource(resource); | ||
| 299 | kfree(resource); | ||
| 300 | } | ||
| 301 | |||
| 302 | static enum bp_state reserve_additional_memory(void) | 300 | static enum bp_state reserve_additional_memory(void) |
| 303 | { | 301 | { |
| 304 | long credit; | 302 | long credit; |
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 8e67336f8ddd..6a25533da237 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c | |||
| @@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size, | |||
| 183 | field_start = OFFSET(cfg_entry); | 183 | field_start = OFFSET(cfg_entry); |
| 184 | field_end = OFFSET(cfg_entry) + field->size; | 184 | field_end = OFFSET(cfg_entry) + field->size; |
| 185 | 185 | ||
| 186 | if ((req_start >= field_start && req_start < field_end) | 186 | if (req_end > field_start && field_end > req_start) { |
| 187 | || (req_end > field_start && req_end <= field_end)) { | ||
| 188 | err = conf_space_read(dev, cfg_entry, field_start, | 187 | err = conf_space_read(dev, cfg_entry, field_start, |
| 189 | &tmp_val); | 188 | &tmp_val); |
| 190 | if (err) | 189 | if (err) |
| @@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) | |||
| 230 | field_start = OFFSET(cfg_entry); | 229 | field_start = OFFSET(cfg_entry); |
| 231 | field_end = OFFSET(cfg_entry) + field->size; | 230 | field_end = OFFSET(cfg_entry) + field->size; |
| 232 | 231 | ||
| 233 | if ((req_start >= field_start && req_start < field_end) | 232 | if (req_end > field_start && field_end > req_start) { |
| 234 | || (req_end > field_start && req_end <= field_end)) { | ||
| 235 | tmp_val = 0; | 233 | tmp_val = 0; |
| 236 | 234 | ||
| 237 | err = xen_pcibk_config_read(dev, field_start, | 235 | err = xen_pcibk_config_read(dev, field_start, |
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index ad3d17d29c81..9ead1c2ff1dd 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c | |||
| @@ -145,7 +145,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data) | |||
| 145 | /* A write to obtain the length must happen as a 32-bit write. | 145 | /* A write to obtain the length must happen as a 32-bit write. |
| 146 | * This does not (yet) support writing individual bytes | 146 | * This does not (yet) support writing individual bytes |
| 147 | */ | 147 | */ |
| 148 | if (value == ~PCI_ROM_ADDRESS_ENABLE) | 148 | if ((value | ~PCI_ROM_ADDRESS_MASK) == ~0U) |
| 149 | bar->which = 1; | 149 | bar->which = 1; |
| 150 | else { | 150 | else { |
| 151 | u32 tmpval; | 151 | u32 tmpval; |
| @@ -225,38 +225,42 @@ static inline void read_dev_bar(struct pci_dev *dev, | |||
| 225 | (PCI_BASE_ADDRESS_SPACE_MEMORY | | 225 | (PCI_BASE_ADDRESS_SPACE_MEMORY | |
| 226 | PCI_BASE_ADDRESS_MEM_TYPE_64))) { | 226 | PCI_BASE_ADDRESS_MEM_TYPE_64))) { |
| 227 | bar_info->val = res[pos - 1].start >> 32; | 227 | bar_info->val = res[pos - 1].start >> 32; |
| 228 | bar_info->len_val = res[pos - 1].end >> 32; | 228 | bar_info->len_val = -resource_size(&res[pos - 1]) >> 32; |
| 229 | return; | 229 | return; |
| 230 | } | 230 | } |
| 231 | } | 231 | } |
| 232 | 232 | ||
| 233 | if (!res[pos].flags || | ||
| 234 | (res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET | | ||
| 235 | IORESOURCE_BUSY))) | ||
| 236 | return; | ||
| 237 | |||
| 233 | bar_info->val = res[pos].start | | 238 | bar_info->val = res[pos].start | |
| 234 | (res[pos].flags & PCI_REGION_FLAG_MASK); | 239 | (res[pos].flags & PCI_REGION_FLAG_MASK); |
| 235 | bar_info->len_val = resource_size(&res[pos]); | 240 | bar_info->len_val = -resource_size(&res[pos]) | |
| 241 | (res[pos].flags & PCI_REGION_FLAG_MASK); | ||
| 236 | } | 242 | } |
| 237 | 243 | ||
| 238 | static void *bar_init(struct pci_dev *dev, int offset) | 244 | static void *bar_init(struct pci_dev *dev, int offset) |
| 239 | { | 245 | { |
| 240 | struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); | 246 | struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL); |
| 241 | 247 | ||
| 242 | if (!bar) | 248 | if (!bar) |
| 243 | return ERR_PTR(-ENOMEM); | 249 | return ERR_PTR(-ENOMEM); |
| 244 | 250 | ||
| 245 | read_dev_bar(dev, bar, offset, ~0); | 251 | read_dev_bar(dev, bar, offset, ~0); |
| 246 | bar->which = 0; | ||
| 247 | 252 | ||
| 248 | return bar; | 253 | return bar; |
| 249 | } | 254 | } |
| 250 | 255 | ||
| 251 | static void *rom_init(struct pci_dev *dev, int offset) | 256 | static void *rom_init(struct pci_dev *dev, int offset) |
| 252 | { | 257 | { |
| 253 | struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); | 258 | struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL); |
| 254 | 259 | ||
| 255 | if (!bar) | 260 | if (!bar) |
| 256 | return ERR_PTR(-ENOMEM); | 261 | return ERR_PTR(-ENOMEM); |
| 257 | 262 | ||
| 258 | read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE); | 263 | read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE); |
| 259 | bar->which = 0; | ||
| 260 | 264 | ||
| 261 | return bar; | 265 | return bar; |
| 262 | } | 266 | } |
