diff options
Diffstat (limited to 'drivers')
633 files changed, 6537 insertions, 3916 deletions
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 5a968a78652b..0d2e98920069 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c | |||
| @@ -416,13 +416,18 @@ acpi_tb_get_table(struct acpi_table_desc *table_desc, | |||
| 416 | } | 416 | } |
| 417 | } | 417 | } |
| 418 | 418 | ||
| 419 | table_desc->validation_count++; | 419 | if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) { |
| 420 | if (table_desc->validation_count == 0) { | 420 | table_desc->validation_count++; |
| 421 | ACPI_ERROR((AE_INFO, | 421 | |
| 422 | "Table %p, Validation count is zero after increment\n", | 422 | /* |
| 423 | table_desc)); | 423 | * Detect validation_count overflows to ensure that the warning |
| 424 | table_desc->validation_count--; | 424 | * message will only be printed once. |
| 425 | return_ACPI_STATUS(AE_LIMIT); | 425 | */ |
| 426 | if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) { | ||
| 427 | ACPI_WARNING((AE_INFO, | ||
| 428 | "Table %p, Validation count overflows\n", | ||
| 429 | table_desc)); | ||
| 430 | } | ||
| 426 | } | 431 | } |
| 427 | 432 | ||
| 428 | *out_table = table_desc->pointer; | 433 | *out_table = table_desc->pointer; |
| @@ -449,13 +454,20 @@ void acpi_tb_put_table(struct acpi_table_desc *table_desc) | |||
| 449 | 454 | ||
| 450 | ACPI_FUNCTION_TRACE(acpi_tb_put_table); | 455 | ACPI_FUNCTION_TRACE(acpi_tb_put_table); |
| 451 | 456 | ||
| 452 | if (table_desc->validation_count == 0) { | 457 | if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) { |
| 453 | ACPI_WARNING((AE_INFO, | 458 | table_desc->validation_count--; |
| 454 | "Table %p, Validation count is zero before decrement\n", | 459 | |
| 455 | table_desc)); | 460 | /* |
| 456 | return_VOID; | 461 | * Detect validation_count underflows to ensure that the warning |
| 462 | * message will only be printed once. | ||
| 463 | */ | ||
| 464 | if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) { | ||
| 465 | ACPI_WARNING((AE_INFO, | ||
| 466 | "Table %p, Validation count underflows\n", | ||
| 467 | table_desc)); | ||
| 468 | return_VOID; | ||
| 469 | } | ||
| 457 | } | 470 | } |
| 458 | table_desc->validation_count--; | ||
| 459 | 471 | ||
| 460 | if (table_desc->validation_count == 0) { | 472 | if (table_desc->validation_count == 0) { |
| 461 | 473 | ||
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index e0587c85bafd..ff096d9755b9 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c | |||
| @@ -474,15 +474,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state, | |||
| 474 | return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); | 474 | return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); |
| 475 | } | 475 | } |
| 476 | 476 | ||
| 477 | /* | ||
| 478 | * The end_tag opcode must be followed by a zero byte. | ||
| 479 | * Although this byte is technically defined to be a checksum, | ||
| 480 | * in practice, all ASL compilers set this byte to zero. | ||
| 481 | */ | ||
| 482 | if (*(aml + 1) != 0) { | ||
| 483 | return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); | ||
| 484 | } | ||
| 485 | |||
| 486 | /* Return the pointer to the end_tag if requested */ | 477 | /* Return the pointer to the end_tag if requested */ |
| 487 | 478 | ||
| 488 | if (!user_function) { | 479 | if (!user_function) { |
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index c5fecf97ee2f..797b28dc7b34 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c | |||
| @@ -666,14 +666,6 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev, | |||
| 666 | int ret = -ENODEV; | 666 | int ret = -ENODEV; |
| 667 | struct fwnode_handle *iort_fwnode; | 667 | struct fwnode_handle *iort_fwnode; |
| 668 | 668 | ||
| 669 | /* | ||
| 670 | * If we already translated the fwspec there | ||
| 671 | * is nothing left to do, return the iommu_ops. | ||
| 672 | */ | ||
| 673 | ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); | ||
| 674 | if (ops) | ||
| 675 | return ops; | ||
| 676 | |||
| 677 | if (node) { | 669 | if (node) { |
| 678 | iort_fwnode = iort_get_fwnode(node); | 670 | iort_fwnode = iort_get_fwnode(node); |
| 679 | if (!iort_fwnode) | 671 | if (!iort_fwnode) |
| @@ -735,6 +727,14 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) | |||
| 735 | u32 streamid = 0; | 727 | u32 streamid = 0; |
| 736 | int err; | 728 | int err; |
| 737 | 729 | ||
| 730 | /* | ||
| 731 | * If we already translated the fwspec there | ||
| 732 | * is nothing left to do, return the iommu_ops. | ||
| 733 | */ | ||
| 734 | ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); | ||
| 735 | if (ops) | ||
| 736 | return ops; | ||
| 737 | |||
| 738 | if (dev_is_pci(dev)) { | 738 | if (dev_is_pci(dev)) { |
| 739 | struct pci_bus *bus = to_pci_dev(dev)->bus; | 739 | struct pci_bus *bus = to_pci_dev(dev)->bus; |
| 740 | u32 rid; | 740 | u32 rid; |
| @@ -782,6 +782,12 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) | |||
| 782 | if (err) | 782 | if (err) |
| 783 | ops = ERR_PTR(err); | 783 | ops = ERR_PTR(err); |
| 784 | 784 | ||
| 785 | /* Ignore all other errors apart from EPROBE_DEFER */ | ||
| 786 | if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) { | ||
| 787 | dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops)); | ||
| 788 | ops = NULL; | ||
| 789 | } | ||
| 790 | |||
| 785 | return ops; | 791 | return ops; |
| 786 | } | 792 | } |
| 787 | 793 | ||
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index a9a9ab3399d4..d42eeef9d928 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
| @@ -782,7 +782,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume) | |||
| 782 | if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) || | 782 | if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) || |
| 783 | (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) && | 783 | (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) && |
| 784 | (battery->capacity_now <= battery->alarm))) | 784 | (battery->capacity_now <= battery->alarm))) |
| 785 | pm_wakeup_hard_event(&battery->device->dev); | 785 | pm_wakeup_event(&battery->device->dev, 0); |
| 786 | 786 | ||
| 787 | return result; | 787 | return result; |
| 788 | } | 788 | } |
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 25aba9b107dd..e19f530f1083 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c | |||
| @@ -113,7 +113,7 @@ struct acpi_button { | |||
| 113 | 113 | ||
| 114 | static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); | 114 | static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); |
| 115 | static struct acpi_device *lid_device; | 115 | static struct acpi_device *lid_device; |
| 116 | static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN; | 116 | static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD; |
| 117 | 117 | ||
| 118 | static unsigned long lid_report_interval __read_mostly = 500; | 118 | static unsigned long lid_report_interval __read_mostly = 500; |
| 119 | module_param(lid_report_interval, ulong, 0644); | 119 | module_param(lid_report_interval, ulong, 0644); |
| @@ -217,7 +217,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state) | |||
| 217 | } | 217 | } |
| 218 | 218 | ||
| 219 | if (state) | 219 | if (state) |
| 220 | pm_wakeup_hard_event(&device->dev); | 220 | pm_wakeup_event(&device->dev, 0); |
| 221 | 221 | ||
| 222 | ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); | 222 | ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); |
| 223 | if (ret == NOTIFY_DONE) | 223 | if (ret == NOTIFY_DONE) |
| @@ -402,7 +402,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) | |||
| 402 | } else { | 402 | } else { |
| 403 | int keycode; | 403 | int keycode; |
| 404 | 404 | ||
| 405 | pm_wakeup_hard_event(&device->dev); | 405 | pm_wakeup_event(&device->dev, 0); |
| 406 | if (button->suspended) | 406 | if (button->suspended) |
| 407 | break; | 407 | break; |
| 408 | 408 | ||
| @@ -534,7 +534,6 @@ static int acpi_button_add(struct acpi_device *device) | |||
| 534 | lid_device = device; | 534 | lid_device = device; |
| 535 | } | 535 | } |
| 536 | 536 | ||
| 537 | device_init_wakeup(&device->dev, true); | ||
| 538 | printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); | 537 | printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); |
| 539 | return 0; | 538 | return 0; |
| 540 | 539 | ||
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 798d5003a039..993fd31394c8 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | #include <linux/pm_qos.h> | 24 | #include <linux/pm_qos.h> |
| 25 | #include <linux/pm_domain.h> | 25 | #include <linux/pm_domain.h> |
| 26 | #include <linux/pm_runtime.h> | 26 | #include <linux/pm_runtime.h> |
| 27 | #include <linux/suspend.h> | ||
| 28 | 27 | ||
| 29 | #include "internal.h" | 28 | #include "internal.h" |
| 30 | 29 | ||
| @@ -400,7 +399,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used) | |||
| 400 | mutex_lock(&acpi_pm_notifier_lock); | 399 | mutex_lock(&acpi_pm_notifier_lock); |
| 401 | 400 | ||
| 402 | if (adev->wakeup.flags.notifier_present) { | 401 | if (adev->wakeup.flags.notifier_present) { |
| 403 | pm_wakeup_ws_event(adev->wakeup.ws, 0, true); | 402 | __pm_wakeup_event(adev->wakeup.ws, 0); |
| 404 | if (adev->wakeup.context.work.func) | 403 | if (adev->wakeup.context.work.func) |
| 405 | queue_pm_work(&adev->wakeup.context.work); | 404 | queue_pm_work(&adev->wakeup.context.work); |
| 406 | } | 405 | } |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index e39ec7b7cb67..d53162997f32 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
| @@ -1371,8 +1371,8 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) | |||
| 1371 | iort_set_dma_mask(dev); | 1371 | iort_set_dma_mask(dev); |
| 1372 | 1372 | ||
| 1373 | iommu = iort_iommu_configure(dev); | 1373 | iommu = iort_iommu_configure(dev); |
| 1374 | if (IS_ERR(iommu)) | 1374 | if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER) |
| 1375 | return PTR_ERR(iommu); | 1375 | return -EPROBE_DEFER; |
| 1376 | 1376 | ||
| 1377 | size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); | 1377 | size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); |
| 1378 | /* | 1378 | /* |
| @@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev) | |||
| 1428 | adev->flags.coherent_dma = cca; | 1428 | adev->flags.coherent_dma = cca; |
| 1429 | } | 1429 | } |
| 1430 | 1430 | ||
| 1431 | static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data) | ||
| 1432 | { | ||
| 1433 | bool *is_spi_i2c_slave_p = data; | ||
| 1434 | |||
| 1435 | if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) | ||
| 1436 | return 1; | ||
| 1437 | |||
| 1438 | /* | ||
| 1439 | * devices that are connected to UART still need to be enumerated to | ||
| 1440 | * platform bus | ||
| 1441 | */ | ||
| 1442 | if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART) | ||
| 1443 | *is_spi_i2c_slave_p = true; | ||
| 1444 | |||
| 1445 | /* no need to do more checking */ | ||
| 1446 | return -1; | ||
| 1447 | } | ||
| 1448 | |||
| 1449 | static bool acpi_is_spi_i2c_slave(struct acpi_device *device) | ||
| 1450 | { | ||
| 1451 | struct list_head resource_list; | ||
| 1452 | bool is_spi_i2c_slave = false; | ||
| 1453 | |||
| 1454 | INIT_LIST_HEAD(&resource_list); | ||
| 1455 | acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave, | ||
| 1456 | &is_spi_i2c_slave); | ||
| 1457 | acpi_dev_free_resource_list(&resource_list); | ||
| 1458 | |||
| 1459 | return is_spi_i2c_slave; | ||
| 1460 | } | ||
| 1461 | |||
| 1431 | void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, | 1462 | void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, |
| 1432 | int type, unsigned long long sta) | 1463 | int type, unsigned long long sta) |
| 1433 | { | 1464 | { |
| @@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, | |||
| 1443 | acpi_bus_get_flags(device); | 1474 | acpi_bus_get_flags(device); |
| 1444 | device->flags.match_driver = false; | 1475 | device->flags.match_driver = false; |
| 1445 | device->flags.initialized = true; | 1476 | device->flags.initialized = true; |
| 1477 | device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device); | ||
| 1446 | acpi_device_clear_enumerated(device); | 1478 | acpi_device_clear_enumerated(device); |
| 1447 | device_initialize(&device->dev); | 1479 | device_initialize(&device->dev); |
| 1448 | dev_set_uevent_suppress(&device->dev, true); | 1480 | dev_set_uevent_suppress(&device->dev, true); |
| @@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used, | |||
| 1727 | return AE_OK; | 1759 | return AE_OK; |
| 1728 | } | 1760 | } |
| 1729 | 1761 | ||
| 1730 | static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data) | ||
| 1731 | { | ||
| 1732 | bool *is_spi_i2c_slave_p = data; | ||
| 1733 | |||
| 1734 | if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) | ||
| 1735 | return 1; | ||
| 1736 | |||
| 1737 | /* | ||
| 1738 | * devices that are connected to UART still need to be enumerated to | ||
| 1739 | * platform bus | ||
| 1740 | */ | ||
| 1741 | if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART) | ||
| 1742 | *is_spi_i2c_slave_p = true; | ||
| 1743 | |||
| 1744 | /* no need to do more checking */ | ||
| 1745 | return -1; | ||
| 1746 | } | ||
| 1747 | |||
| 1748 | static void acpi_default_enumeration(struct acpi_device *device) | 1762 | static void acpi_default_enumeration(struct acpi_device *device) |
| 1749 | { | 1763 | { |
| 1750 | struct list_head resource_list; | ||
| 1751 | bool is_spi_i2c_slave = false; | ||
| 1752 | |||
| 1753 | /* | 1764 | /* |
| 1754 | * Do not enumerate SPI/I2C slaves as they will be enumerated by their | 1765 | * Do not enumerate SPI/I2C slaves as they will be enumerated by their |
| 1755 | * respective parents. | 1766 | * respective parents. |
| 1756 | */ | 1767 | */ |
| 1757 | INIT_LIST_HEAD(&resource_list); | 1768 | if (!device->flags.spi_i2c_slave) { |
| 1758 | acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave, | ||
| 1759 | &is_spi_i2c_slave); | ||
| 1760 | acpi_dev_free_resource_list(&resource_list); | ||
| 1761 | if (!is_spi_i2c_slave) { | ||
| 1762 | acpi_create_platform_device(device, NULL); | 1769 | acpi_create_platform_device(device, NULL); |
| 1763 | acpi_device_set_enumerated(device); | 1770 | acpi_device_set_enumerated(device); |
| 1764 | } else { | 1771 | } else { |
| @@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device) | |||
| 1854 | return; | 1861 | return; |
| 1855 | 1862 | ||
| 1856 | device->flags.match_driver = true; | 1863 | device->flags.match_driver = true; |
| 1857 | if (ret > 0) { | 1864 | if (ret > 0 && !device->flags.spi_i2c_slave) { |
| 1858 | acpi_device_set_enumerated(device); | 1865 | acpi_device_set_enumerated(device); |
| 1859 | goto ok; | 1866 | goto ok; |
| 1860 | } | 1867 | } |
| @@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device) | |||
| 1863 | if (ret < 0) | 1870 | if (ret < 0) |
| 1864 | return; | 1871 | return; |
| 1865 | 1872 | ||
| 1866 | if (device->pnp.type.platform_id) | 1873 | if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave) |
| 1867 | acpi_default_enumeration(device); | ||
| 1868 | else | ||
| 1869 | acpi_device_set_enumerated(device); | 1874 | acpi_device_set_enumerated(device); |
| 1875 | else | ||
| 1876 | acpi_default_enumeration(device); | ||
| 1870 | 1877 | ||
| 1871 | ok: | 1878 | ok: |
| 1872 | list_for_each_entry(child, &device->children, node) | 1879 | list_for_each_entry(child, &device->children, node) |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index a6574d626340..097d630ab886 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
| @@ -663,40 +663,14 @@ static int acpi_freeze_prepare(void) | |||
| 663 | acpi_os_wait_events_complete(); | 663 | acpi_os_wait_events_complete(); |
| 664 | if (acpi_sci_irq_valid()) | 664 | if (acpi_sci_irq_valid()) |
| 665 | enable_irq_wake(acpi_sci_irq); | 665 | enable_irq_wake(acpi_sci_irq); |
| 666 | |||
| 667 | return 0; | 666 | return 0; |
| 668 | } | 667 | } |
| 669 | 668 | ||
| 670 | static void acpi_freeze_wake(void) | ||
| 671 | { | ||
| 672 | /* | ||
| 673 | * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means | ||
| 674 | * that the SCI has triggered while suspended, so cancel the wakeup in | ||
| 675 | * case it has not been a wakeup event (the GPEs will be checked later). | ||
| 676 | */ | ||
| 677 | if (acpi_sci_irq_valid() && | ||
| 678 | !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) | ||
| 679 | pm_system_cancel_wakeup(); | ||
| 680 | } | ||
| 681 | |||
| 682 | static void acpi_freeze_sync(void) | ||
| 683 | { | ||
| 684 | /* | ||
| 685 | * Process all pending events in case there are any wakeup ones. | ||
| 686 | * | ||
| 687 | * The EC driver uses the system workqueue, so that one needs to be | ||
| 688 | * flushed too. | ||
| 689 | */ | ||
| 690 | acpi_os_wait_events_complete(); | ||
| 691 | flush_scheduled_work(); | ||
| 692 | } | ||
| 693 | |||
| 694 | static void acpi_freeze_restore(void) | 669 | static void acpi_freeze_restore(void) |
| 695 | { | 670 | { |
| 696 | acpi_disable_wakeup_devices(ACPI_STATE_S0); | 671 | acpi_disable_wakeup_devices(ACPI_STATE_S0); |
| 697 | if (acpi_sci_irq_valid()) | 672 | if (acpi_sci_irq_valid()) |
| 698 | disable_irq_wake(acpi_sci_irq); | 673 | disable_irq_wake(acpi_sci_irq); |
| 699 | |||
| 700 | acpi_enable_all_runtime_gpes(); | 674 | acpi_enable_all_runtime_gpes(); |
| 701 | } | 675 | } |
| 702 | 676 | ||
| @@ -708,8 +682,6 @@ static void acpi_freeze_end(void) | |||
| 708 | static const struct platform_freeze_ops acpi_freeze_ops = { | 682 | static const struct platform_freeze_ops acpi_freeze_ops = { |
| 709 | .begin = acpi_freeze_begin, | 683 | .begin = acpi_freeze_begin, |
| 710 | .prepare = acpi_freeze_prepare, | 684 | .prepare = acpi_freeze_prepare, |
| 711 | .wake = acpi_freeze_wake, | ||
| 712 | .sync = acpi_freeze_sync, | ||
| 713 | .restore = acpi_freeze_restore, | 685 | .restore = acpi_freeze_restore, |
| 714 | .end = acpi_freeze_end, | 686 | .end = acpi_freeze_end, |
| 715 | }; | 687 | }; |
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 1b5ee1e0e5a3..e414fabf7315 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c | |||
| @@ -333,14 +333,17 @@ static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj, | |||
| 333 | container_of(bin_attr, struct acpi_table_attr, attr); | 333 | container_of(bin_attr, struct acpi_table_attr, attr); |
| 334 | struct acpi_table_header *table_header = NULL; | 334 | struct acpi_table_header *table_header = NULL; |
| 335 | acpi_status status; | 335 | acpi_status status; |
| 336 | ssize_t rc; | ||
| 336 | 337 | ||
| 337 | status = acpi_get_table(table_attr->name, table_attr->instance, | 338 | status = acpi_get_table(table_attr->name, table_attr->instance, |
| 338 | &table_header); | 339 | &table_header); |
| 339 | if (ACPI_FAILURE(status)) | 340 | if (ACPI_FAILURE(status)) |
| 340 | return -ENODEV; | 341 | return -ENODEV; |
| 341 | 342 | ||
| 342 | return memory_read_from_buffer(buf, count, &offset, | 343 | rc = memory_read_from_buffer(buf, count, &offset, table_header, |
| 343 | table_header, table_header->length); | 344 | table_header->length); |
| 345 | acpi_put_table(table_header); | ||
| 346 | return rc; | ||
| 344 | } | 347 | } |
| 345 | 348 | ||
| 346 | static int acpi_table_attr_init(struct kobject *tables_obj, | 349 | static int acpi_table_attr_init(struct kobject *tables_obj, |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 2fc52407306c..c69954023c2e 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
| @@ -1364,6 +1364,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host) | |||
| 1364 | {} | 1364 | {} |
| 1365 | #endif | 1365 | #endif |
| 1366 | 1366 | ||
| 1367 | /* | ||
| 1368 | * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected | ||
| 1369 | * as DUMMY, or detected but eventually get a "link down" and never get up | ||
| 1370 | * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the | ||
| 1371 | * port_map may hold a value of 0x00. | ||
| 1372 | * | ||
| 1373 | * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports | ||
| 1374 | * and can significantly reduce the occurrence of the problem. | ||
| 1375 | * | ||
| 1376 | * https://bugzilla.kernel.org/show_bug.cgi?id=189471 | ||
| 1377 | */ | ||
| 1378 | static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv, | ||
| 1379 | struct pci_dev *pdev) | ||
| 1380 | { | ||
| 1381 | static const struct dmi_system_id sysids[] = { | ||
| 1382 | { | ||
| 1383 | .ident = "Acer Switch Alpha 12", | ||
| 1384 | .matches = { | ||
| 1385 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
| 1386 | DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271") | ||
| 1387 | }, | ||
| 1388 | }, | ||
| 1389 | { } | ||
| 1390 | }; | ||
| 1391 | |||
| 1392 | if (dmi_check_system(sysids)) { | ||
| 1393 | dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n"); | ||
| 1394 | if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) { | ||
| 1395 | hpriv->port_map = 0x7; | ||
| 1396 | hpriv->cap = 0xC734FF02; | ||
| 1397 | } | ||
| 1398 | } | ||
| 1399 | } | ||
| 1400 | |||
| 1367 | #ifdef CONFIG_ARM64 | 1401 | #ifdef CONFIG_ARM64 |
| 1368 | /* | 1402 | /* |
| 1369 | * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently. | 1403 | * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently. |
| @@ -1636,6 +1670,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1636 | "online status unreliable, applying workaround\n"); | 1670 | "online status unreliable, applying workaround\n"); |
| 1637 | } | 1671 | } |
| 1638 | 1672 | ||
| 1673 | |||
| 1674 | /* Acer SA5-271 workaround modifies private_data */ | ||
| 1675 | acer_sa5_271_workaround(hpriv, pdev); | ||
| 1676 | |||
| 1639 | /* CAP.NP sometimes indicate the index of the last enabled | 1677 | /* CAP.NP sometimes indicate the index of the last enabled |
| 1640 | * port, at other times, that of the last possible port, so | 1678 | * port, at other times, that of the last possible port, so |
| 1641 | * determining the maximum port number requires looking at | 1679 | * determining the maximum port number requires looking at |
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index aaa761b9081c..cd2eab6aa92e 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c | |||
| @@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev, | |||
| 514 | 514 | ||
| 515 | irq = platform_get_irq(pdev, 0); | 515 | irq = platform_get_irq(pdev, 0); |
| 516 | if (irq <= 0) { | 516 | if (irq <= 0) { |
| 517 | dev_err(dev, "no irq\n"); | 517 | if (irq != -EPROBE_DEFER) |
| 518 | return -EINVAL; | 518 | dev_err(dev, "no irq\n"); |
| 519 | return irq; | ||
| 519 | } | 520 | } |
| 520 | 521 | ||
| 521 | hpriv->irq = irq; | 522 | hpriv->irq = irq; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 2d83b8c75965..e157a0e44419 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -6800,7 +6800,7 @@ static int __init ata_parse_force_one(char **cur, | |||
| 6800 | } | 6800 | } |
| 6801 | 6801 | ||
| 6802 | force_ent->port = simple_strtoul(id, &endp, 10); | 6802 | force_ent->port = simple_strtoul(id, &endp, 10); |
| 6803 | if (p == endp || *endp != '\0') { | 6803 | if (id == endp || *endp != '\0') { |
| 6804 | *reason = "invalid port/link"; | 6804 | *reason = "invalid port/link"; |
| 6805 | return -EINVAL; | 6805 | return -EINVAL; |
| 6806 | } | 6806 | } |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index b66bcda88320..3b2246dded74 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
| @@ -4067,7 +4067,6 @@ static int mv_platform_probe(struct platform_device *pdev) | |||
| 4067 | struct ata_host *host; | 4067 | struct ata_host *host; |
| 4068 | struct mv_host_priv *hpriv; | 4068 | struct mv_host_priv *hpriv; |
| 4069 | struct resource *res; | 4069 | struct resource *res; |
| 4070 | void __iomem *mmio; | ||
| 4071 | int n_ports = 0, irq = 0; | 4070 | int n_ports = 0, irq = 0; |
| 4072 | int rc; | 4071 | int rc; |
| 4073 | int port; | 4072 | int port; |
| @@ -4086,9 +4085,8 @@ static int mv_platform_probe(struct platform_device *pdev) | |||
| 4086 | * Get the register base first | 4085 | * Get the register base first |
| 4087 | */ | 4086 | */ |
| 4088 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 4087 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 4089 | mmio = devm_ioremap_resource(&pdev->dev, res); | 4088 | if (res == NULL) |
| 4090 | if (IS_ERR(mmio)) | 4089 | return -EINVAL; |
| 4091 | return PTR_ERR(mmio); | ||
| 4092 | 4090 | ||
| 4093 | /* allocate host */ | 4091 | /* allocate host */ |
| 4094 | if (pdev->dev.of_node) { | 4092 | if (pdev->dev.of_node) { |
| @@ -4132,7 +4130,12 @@ static int mv_platform_probe(struct platform_device *pdev) | |||
| 4132 | hpriv->board_idx = chip_soc; | 4130 | hpriv->board_idx = chip_soc; |
| 4133 | 4131 | ||
| 4134 | host->iomap = NULL; | 4132 | host->iomap = NULL; |
| 4135 | hpriv->base = mmio - SATAHC0_REG_BASE; | 4133 | hpriv->base = devm_ioremap(&pdev->dev, res->start, |
| 4134 | resource_size(res)); | ||
| 4135 | if (!hpriv->base) | ||
| 4136 | return -ENOMEM; | ||
| 4137 | |||
| 4138 | hpriv->base -= SATAHC0_REG_BASE; | ||
| 4136 | 4139 | ||
| 4137 | hpriv->clk = clk_get(&pdev->dev, NULL); | 4140 | hpriv->clk = clk_get(&pdev->dev, NULL); |
| 4138 | if (IS_ERR(hpriv->clk)) | 4141 | if (IS_ERR(hpriv->clk)) |
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 5d38245a7a73..b7939a2c1fab 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c | |||
| @@ -890,7 +890,10 @@ static int sata_rcar_probe(struct platform_device *pdev) | |||
| 890 | dev_err(&pdev->dev, "failed to get access to sata clock\n"); | 890 | dev_err(&pdev->dev, "failed to get access to sata clock\n"); |
| 891 | return PTR_ERR(priv->clk); | 891 | return PTR_ERR(priv->clk); |
| 892 | } | 892 | } |
| 893 | clk_prepare_enable(priv->clk); | 893 | |
| 894 | ret = clk_prepare_enable(priv->clk); | ||
| 895 | if (ret) | ||
| 896 | return ret; | ||
| 894 | 897 | ||
| 895 | host = ata_host_alloc(&pdev->dev, 1); | 898 | host = ata_host_alloc(&pdev->dev, 1); |
| 896 | if (!host) { | 899 | if (!host) { |
| @@ -970,8 +973,11 @@ static int sata_rcar_resume(struct device *dev) | |||
| 970 | struct ata_host *host = dev_get_drvdata(dev); | 973 | struct ata_host *host = dev_get_drvdata(dev); |
| 971 | struct sata_rcar_priv *priv = host->private_data; | 974 | struct sata_rcar_priv *priv = host->private_data; |
| 972 | void __iomem *base = priv->base; | 975 | void __iomem *base = priv->base; |
| 976 | int ret; | ||
| 973 | 977 | ||
| 974 | clk_prepare_enable(priv->clk); | 978 | ret = clk_prepare_enable(priv->clk); |
| 979 | if (ret) | ||
| 980 | return ret; | ||
| 975 | 981 | ||
| 976 | /* ack and mask */ | 982 | /* ack and mask */ |
| 977 | iowrite32(0, base + SATAINTSTAT_REG); | 983 | iowrite32(0, base + SATAINTSTAT_REG); |
| @@ -988,8 +994,11 @@ static int sata_rcar_restore(struct device *dev) | |||
| 988 | { | 994 | { |
| 989 | struct ata_host *host = dev_get_drvdata(dev); | 995 | struct ata_host *host = dev_get_drvdata(dev); |
| 990 | struct sata_rcar_priv *priv = host->private_data; | 996 | struct sata_rcar_priv *priv = host->private_data; |
| 997 | int ret; | ||
| 991 | 998 | ||
| 992 | clk_prepare_enable(priv->clk); | 999 | ret = clk_prepare_enable(priv->clk); |
| 1000 | if (ret) | ||
| 1001 | return ret; | ||
| 993 | 1002 | ||
| 994 | sata_rcar_setup_port(host); | 1003 | sata_rcar_setup_port(host); |
| 995 | 1004 | ||
diff --git a/drivers/base/base.h b/drivers/base/base.h index e19b1008e5fb..539432a14b5c 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h | |||
| @@ -126,11 +126,6 @@ extern int driver_add_groups(struct device_driver *drv, | |||
| 126 | extern void driver_remove_groups(struct device_driver *drv, | 126 | extern void driver_remove_groups(struct device_driver *drv, |
| 127 | const struct attribute_group **groups); | 127 | const struct attribute_group **groups); |
| 128 | 128 | ||
| 129 | extern int device_add_groups(struct device *dev, | ||
| 130 | const struct attribute_group **groups); | ||
| 131 | extern void device_remove_groups(struct device *dev, | ||
| 132 | const struct attribute_group **groups); | ||
| 133 | |||
| 134 | extern char *make_class_name(const char *name, struct kobject *kobj); | 129 | extern char *make_class_name(const char *name, struct kobject *kobj); |
| 135 | 130 | ||
| 136 | extern int devres_release_all(struct device *dev); | 131 | extern int devres_release_all(struct device *dev); |
diff --git a/drivers/base/core.c b/drivers/base/core.c index bbecaf9293be..09723532725d 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
| @@ -1026,12 +1026,144 @@ int device_add_groups(struct device *dev, const struct attribute_group **groups) | |||
| 1026 | { | 1026 | { |
| 1027 | return sysfs_create_groups(&dev->kobj, groups); | 1027 | return sysfs_create_groups(&dev->kobj, groups); |
| 1028 | } | 1028 | } |
| 1029 | EXPORT_SYMBOL_GPL(device_add_groups); | ||
| 1029 | 1030 | ||
| 1030 | void device_remove_groups(struct device *dev, | 1031 | void device_remove_groups(struct device *dev, |
| 1031 | const struct attribute_group **groups) | 1032 | const struct attribute_group **groups) |
| 1032 | { | 1033 | { |
| 1033 | sysfs_remove_groups(&dev->kobj, groups); | 1034 | sysfs_remove_groups(&dev->kobj, groups); |
| 1034 | } | 1035 | } |
| 1036 | EXPORT_SYMBOL_GPL(device_remove_groups); | ||
| 1037 | |||
| 1038 | union device_attr_group_devres { | ||
| 1039 | const struct attribute_group *group; | ||
| 1040 | const struct attribute_group **groups; | ||
| 1041 | }; | ||
| 1042 | |||
| 1043 | static int devm_attr_group_match(struct device *dev, void *res, void *data) | ||
| 1044 | { | ||
| 1045 | return ((union device_attr_group_devres *)res)->group == data; | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | static void devm_attr_group_remove(struct device *dev, void *res) | ||
| 1049 | { | ||
| 1050 | union device_attr_group_devres *devres = res; | ||
| 1051 | const struct attribute_group *group = devres->group; | ||
| 1052 | |||
| 1053 | dev_dbg(dev, "%s: removing group %p\n", __func__, group); | ||
| 1054 | sysfs_remove_group(&dev->kobj, group); | ||
| 1055 | } | ||
| 1056 | |||
| 1057 | static void devm_attr_groups_remove(struct device *dev, void *res) | ||
| 1058 | { | ||
| 1059 | union device_attr_group_devres *devres = res; | ||
| 1060 | const struct attribute_group **groups = devres->groups; | ||
| 1061 | |||
| 1062 | dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); | ||
| 1063 | sysfs_remove_groups(&dev->kobj, groups); | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | /** | ||
| 1067 | * devm_device_add_group - given a device, create a managed attribute group | ||
| 1068 | * @dev: The device to create the group for | ||
| 1069 | * @grp: The attribute group to create | ||
| 1070 | * | ||
| 1071 | * This function creates a group for the first time. It will explicitly | ||
| 1072 | * warn and error if any of the attribute files being created already exist. | ||
| 1073 | * | ||
| 1074 | * Returns 0 on success or error code on failure. | ||
| 1075 | */ | ||
| 1076 | int devm_device_add_group(struct device *dev, const struct attribute_group *grp) | ||
| 1077 | { | ||
| 1078 | union device_attr_group_devres *devres; | ||
| 1079 | int error; | ||
| 1080 | |||
| 1081 | devres = devres_alloc(devm_attr_group_remove, | ||
| 1082 | sizeof(*devres), GFP_KERNEL); | ||
| 1083 | if (!devres) | ||
| 1084 | return -ENOMEM; | ||
| 1085 | |||
| 1086 | error = sysfs_create_group(&dev->kobj, grp); | ||
| 1087 | if (error) { | ||
| 1088 | devres_free(devres); | ||
| 1089 | return error; | ||
| 1090 | } | ||
| 1091 | |||
| 1092 | devres->group = grp; | ||
| 1093 | devres_add(dev, devres); | ||
| 1094 | return 0; | ||
| 1095 | } | ||
| 1096 | EXPORT_SYMBOL_GPL(devm_device_add_group); | ||
| 1097 | |||
| 1098 | /** | ||
| 1099 | * devm_device_remove_group: remove a managed group from a device | ||
| 1100 | * @dev: device to remove the group from | ||
| 1101 | * @grp: group to remove | ||
| 1102 | * | ||
| 1103 | * This function removes a group of attributes from a device. The attributes | ||
| 1104 | * previously have to have been created for this group, otherwise it will fail. | ||
| 1105 | */ | ||
| 1106 | void devm_device_remove_group(struct device *dev, | ||
| 1107 | const struct attribute_group *grp) | ||
| 1108 | { | ||
| 1109 | WARN_ON(devres_release(dev, devm_attr_group_remove, | ||
| 1110 | devm_attr_group_match, | ||
| 1111 | /* cast away const */ (void *)grp)); | ||
| 1112 | } | ||
| 1113 | EXPORT_SYMBOL_GPL(devm_device_remove_group); | ||
| 1114 | |||
| 1115 | /** | ||
| 1116 | * devm_device_add_groups - create a bunch of managed attribute groups | ||
| 1117 | * @dev: The device to create the group for | ||
| 1118 | * @groups: The attribute groups to create, NULL terminated | ||
| 1119 | * | ||
| 1120 | * This function creates a bunch of managed attribute groups. If an error | ||
| 1121 | * occurs when creating a group, all previously created groups will be | ||
| 1122 | * removed, unwinding everything back to the original state when this | ||
| 1123 | * function was called. It will explicitly warn and error if any of the | ||
| 1124 | * attribute files being created already exist. | ||
| 1125 | * | ||
| 1126 | * Returns 0 on success or error code from sysfs_create_group on failure. | ||
| 1127 | */ | ||
| 1128 | int devm_device_add_groups(struct device *dev, | ||
| 1129 | const struct attribute_group **groups) | ||
| 1130 | { | ||
| 1131 | union device_attr_group_devres *devres; | ||
| 1132 | int error; | ||
| 1133 | |||
| 1134 | devres = devres_alloc(devm_attr_groups_remove, | ||
| 1135 | sizeof(*devres), GFP_KERNEL); | ||
| 1136 | if (!devres) | ||
| 1137 | return -ENOMEM; | ||
| 1138 | |||
| 1139 | error = sysfs_create_groups(&dev->kobj, groups); | ||
| 1140 | if (error) { | ||
| 1141 | devres_free(devres); | ||
| 1142 | return error; | ||
| 1143 | } | ||
| 1144 | |||
| 1145 | devres->groups = groups; | ||
| 1146 | devres_add(dev, devres); | ||
| 1147 | return 0; | ||
| 1148 | } | ||
| 1149 | EXPORT_SYMBOL_GPL(devm_device_add_groups); | ||
| 1150 | |||
| 1151 | /** | ||
| 1152 | * devm_device_remove_groups - remove a list of managed groups | ||
| 1153 | * | ||
| 1154 | * @dev: The device for the groups to be removed from | ||
| 1155 | * @groups: NULL terminated list of groups to be removed | ||
| 1156 | * | ||
| 1157 | * If groups is not NULL, remove the specified groups from the device. | ||
| 1158 | */ | ||
| 1159 | void devm_device_remove_groups(struct device *dev, | ||
| 1160 | const struct attribute_group **groups) | ||
| 1161 | { | ||
| 1162 | WARN_ON(devres_release(dev, devm_attr_groups_remove, | ||
| 1163 | devm_attr_group_match, | ||
| 1164 | /* cast away const */ (void *)groups)); | ||
| 1165 | } | ||
| 1166 | EXPORT_SYMBOL_GPL(devm_device_remove_groups); | ||
| 1035 | 1167 | ||
| 1036 | static int device_add_attrs(struct device *dev) | 1168 | static int device_add_attrs(struct device *dev) |
| 1037 | { | 1169 | { |
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 4882f06d12df..c17fefc77345 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
| @@ -259,6 +259,8 @@ static void driver_bound(struct device *dev) | |||
| 259 | if (dev->bus) | 259 | if (dev->bus) |
| 260 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, | 260 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, |
| 261 | BUS_NOTIFY_BOUND_DRIVER, dev); | 261 | BUS_NOTIFY_BOUND_DRIVER, dev); |
| 262 | |||
| 263 | kobject_uevent(&dev->kobj, KOBJ_BIND); | ||
| 262 | } | 264 | } |
| 263 | 265 | ||
| 264 | static int driver_sysfs_add(struct device *dev) | 266 | static int driver_sysfs_add(struct device *dev) |
| @@ -848,6 +850,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) | |||
| 848 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, | 850 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, |
| 849 | BUS_NOTIFY_UNBOUND_DRIVER, | 851 | BUS_NOTIFY_UNBOUND_DRIVER, |
| 850 | dev); | 852 | dev); |
| 853 | |||
| 854 | kobject_uevent(&dev->kobj, KOBJ_UNBIND); | ||
| 851 | } | 855 | } |
| 852 | } | 856 | } |
| 853 | 857 | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index e987a6f55d36..9faee1c893e5 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -1091,6 +1091,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a | |||
| 1091 | if (async_error) | 1091 | if (async_error) |
| 1092 | goto Complete; | 1092 | goto Complete; |
| 1093 | 1093 | ||
| 1094 | if (pm_wakeup_pending()) { | ||
| 1095 | async_error = -EBUSY; | ||
| 1096 | goto Complete; | ||
| 1097 | } | ||
| 1098 | |||
| 1094 | if (dev->power.syscore || dev->power.direct_complete) | 1099 | if (dev->power.syscore || dev->power.direct_complete) |
| 1095 | goto Complete; | 1100 | goto Complete; |
| 1096 | 1101 | ||
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 9c36b27996fc..c313b600d356 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
| @@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly; | |||
| 28 | /* First wakeup IRQ seen by the kernel in the last cycle. */ | 28 | /* First wakeup IRQ seen by the kernel in the last cycle. */ |
| 29 | unsigned int pm_wakeup_irq __read_mostly; | 29 | unsigned int pm_wakeup_irq __read_mostly; |
| 30 | 30 | ||
| 31 | /* If greater than 0 and the system is suspending, terminate the suspend. */ | 31 | /* If set and the system is suspending, terminate the suspend. */ |
| 32 | static atomic_t pm_abort_suspend __read_mostly; | 32 | static bool pm_abort_suspend __read_mostly; |
| 33 | 33 | ||
| 34 | /* | 34 | /* |
| 35 | * Combined counters of registered wakeup events and wakeup events in progress. | 35 | * Combined counters of registered wakeup events and wakeup events in progress. |
| @@ -855,26 +855,20 @@ bool pm_wakeup_pending(void) | |||
| 855 | pm_print_active_wakeup_sources(); | 855 | pm_print_active_wakeup_sources(); |
| 856 | } | 856 | } |
| 857 | 857 | ||
| 858 | return ret || atomic_read(&pm_abort_suspend) > 0; | 858 | return ret || pm_abort_suspend; |
| 859 | } | 859 | } |
| 860 | 860 | ||
| 861 | void pm_system_wakeup(void) | 861 | void pm_system_wakeup(void) |
| 862 | { | 862 | { |
| 863 | atomic_inc(&pm_abort_suspend); | 863 | pm_abort_suspend = true; |
| 864 | freeze_wake(); | 864 | freeze_wake(); |
| 865 | } | 865 | } |
| 866 | EXPORT_SYMBOL_GPL(pm_system_wakeup); | 866 | EXPORT_SYMBOL_GPL(pm_system_wakeup); |
| 867 | 867 | ||
| 868 | void pm_system_cancel_wakeup(void) | 868 | void pm_wakeup_clear(void) |
| 869 | { | ||
| 870 | atomic_dec(&pm_abort_suspend); | ||
| 871 | } | ||
| 872 | |||
| 873 | void pm_wakeup_clear(bool reset) | ||
| 874 | { | 869 | { |
| 870 | pm_abort_suspend = false; | ||
| 875 | pm_wakeup_irq = 0; | 871 | pm_wakeup_irq = 0; |
| 876 | if (reset) | ||
| 877 | atomic_set(&pm_abort_suspend, 0); | ||
| 878 | } | 872 | } |
| 879 | 873 | ||
| 880 | void pm_system_irq_wakeup(unsigned int irq_number) | 874 | void pm_system_irq_wakeup(unsigned int irq_number) |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 28d932906f24..ebbd0c3fe0ed 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
| @@ -608,6 +608,9 @@ static int loop_switch(struct loop_device *lo, struct file *file) | |||
| 608 | */ | 608 | */ |
| 609 | static int loop_flush(struct loop_device *lo) | 609 | static int loop_flush(struct loop_device *lo) |
| 610 | { | 610 | { |
| 611 | /* loop not yet configured, no running thread, nothing to flush */ | ||
| 612 | if (lo->lo_state != Lo_bound) | ||
| 613 | return 0; | ||
| 611 | return loop_switch(lo, NULL); | 614 | return loop_switch(lo, NULL); |
| 612 | } | 615 | } |
| 613 | 616 | ||
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9a7bb2c29447..f3f191ba8ca4 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
| @@ -937,14 +937,6 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) | |||
| 937 | return -ENOSPC; | 937 | return -ENOSPC; |
| 938 | } | 938 | } |
| 939 | 939 | ||
| 940 | /* Reset all properties of an NBD device */ | ||
| 941 | static void nbd_reset(struct nbd_device *nbd) | ||
| 942 | { | ||
| 943 | nbd->config = NULL; | ||
| 944 | nbd->tag_set.timeout = 0; | ||
| 945 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); | ||
| 946 | } | ||
| 947 | |||
| 948 | static void nbd_bdev_reset(struct block_device *bdev) | 940 | static void nbd_bdev_reset(struct block_device *bdev) |
| 949 | { | 941 | { |
| 950 | if (bdev->bd_openers > 1) | 942 | if (bdev->bd_openers > 1) |
| @@ -1029,7 +1021,11 @@ static void nbd_config_put(struct nbd_device *nbd) | |||
| 1029 | } | 1021 | } |
| 1030 | kfree(config->socks); | 1022 | kfree(config->socks); |
| 1031 | } | 1023 | } |
| 1032 | nbd_reset(nbd); | 1024 | kfree(nbd->config); |
| 1025 | nbd->config = NULL; | ||
| 1026 | |||
| 1027 | nbd->tag_set.timeout = 0; | ||
| 1028 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); | ||
| 1033 | 1029 | ||
| 1034 | mutex_unlock(&nbd->config_lock); | 1030 | mutex_unlock(&nbd->config_lock); |
| 1035 | nbd_put(nbd); | 1031 | nbd_put(nbd); |
| @@ -1483,7 +1479,6 @@ static int nbd_dev_add(int index) | |||
| 1483 | disk->fops = &nbd_fops; | 1479 | disk->fops = &nbd_fops; |
| 1484 | disk->private_data = nbd; | 1480 | disk->private_data = nbd; |
| 1485 | sprintf(disk->disk_name, "nbd%d", index); | 1481 | sprintf(disk->disk_name, "nbd%d", index); |
| 1486 | nbd_reset(nbd); | ||
| 1487 | add_disk(disk); | 1482 | add_disk(disk); |
| 1488 | nbd_total_devices++; | 1483 | nbd_total_devices++; |
| 1489 | return index; | 1484 | return index; |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 454bf9c34882..c16f74547804 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
| @@ -4023,6 +4023,7 @@ static void rbd_queue_workfn(struct work_struct *work) | |||
| 4023 | 4023 | ||
| 4024 | switch (req_op(rq)) { | 4024 | switch (req_op(rq)) { |
| 4025 | case REQ_OP_DISCARD: | 4025 | case REQ_OP_DISCARD: |
| 4026 | case REQ_OP_WRITE_ZEROES: | ||
| 4026 | op_type = OBJ_OP_DISCARD; | 4027 | op_type = OBJ_OP_DISCARD; |
| 4027 | break; | 4028 | break; |
| 4028 | case REQ_OP_WRITE: | 4029 | case REQ_OP_WRITE: |
| @@ -4420,6 +4421,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) | |||
| 4420 | q->limits.discard_granularity = segment_size; | 4421 | q->limits.discard_granularity = segment_size; |
| 4421 | q->limits.discard_alignment = segment_size; | 4422 | q->limits.discard_alignment = segment_size; |
| 4422 | blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); | 4423 | blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); |
| 4424 | blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); | ||
| 4423 | 4425 | ||
| 4424 | if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) | 4426 | if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) |
| 4425 | q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; | 4427 | q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; |
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 726c32e35db9..0e824091a12f 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
| @@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg) | |||
| 609 | unsigned long timeout; | 609 | unsigned long timeout; |
| 610 | int ret; | 610 | int ret; |
| 611 | 611 | ||
| 612 | xen_blkif_get(blkif); | ||
| 613 | |||
| 614 | set_freezable(); | 612 | set_freezable(); |
| 615 | while (!kthread_should_stop()) { | 613 | while (!kthread_should_stop()) { |
| 616 | if (try_to_freeze()) | 614 | if (try_to_freeze()) |
| @@ -665,7 +663,6 @@ purge_gnt_list: | |||
| 665 | print_stats(ring); | 663 | print_stats(ring); |
| 666 | 664 | ||
| 667 | ring->xenblkd = NULL; | 665 | ring->xenblkd = NULL; |
| 668 | xen_blkif_put(blkif); | ||
| 669 | 666 | ||
| 670 | return 0; | 667 | return 0; |
| 671 | } | 668 | } |
| @@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |||
| 1436 | static void make_response(struct xen_blkif_ring *ring, u64 id, | 1433 | static void make_response(struct xen_blkif_ring *ring, u64 id, |
| 1437 | unsigned short op, int st) | 1434 | unsigned short op, int st) |
| 1438 | { | 1435 | { |
| 1439 | struct blkif_response resp; | 1436 | struct blkif_response *resp; |
| 1440 | unsigned long flags; | 1437 | unsigned long flags; |
| 1441 | union blkif_back_rings *blk_rings; | 1438 | union blkif_back_rings *blk_rings; |
| 1442 | int notify; | 1439 | int notify; |
| 1443 | 1440 | ||
| 1444 | resp.id = id; | ||
| 1445 | resp.operation = op; | ||
| 1446 | resp.status = st; | ||
| 1447 | |||
| 1448 | spin_lock_irqsave(&ring->blk_ring_lock, flags); | 1441 | spin_lock_irqsave(&ring->blk_ring_lock, flags); |
| 1449 | blk_rings = &ring->blk_rings; | 1442 | blk_rings = &ring->blk_rings; |
| 1450 | /* Place on the response ring for the relevant domain. */ | 1443 | /* Place on the response ring for the relevant domain. */ |
| 1451 | switch (ring->blkif->blk_protocol) { | 1444 | switch (ring->blkif->blk_protocol) { |
| 1452 | case BLKIF_PROTOCOL_NATIVE: | 1445 | case BLKIF_PROTOCOL_NATIVE: |
| 1453 | memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), | 1446 | resp = RING_GET_RESPONSE(&blk_rings->native, |
| 1454 | &resp, sizeof(resp)); | 1447 | blk_rings->native.rsp_prod_pvt); |
| 1455 | break; | 1448 | break; |
| 1456 | case BLKIF_PROTOCOL_X86_32: | 1449 | case BLKIF_PROTOCOL_X86_32: |
| 1457 | memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), | 1450 | resp = RING_GET_RESPONSE(&blk_rings->x86_32, |
| 1458 | &resp, sizeof(resp)); | 1451 | blk_rings->x86_32.rsp_prod_pvt); |
| 1459 | break; | 1452 | break; |
| 1460 | case BLKIF_PROTOCOL_X86_64: | 1453 | case BLKIF_PROTOCOL_X86_64: |
| 1461 | memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), | 1454 | resp = RING_GET_RESPONSE(&blk_rings->x86_64, |
| 1462 | &resp, sizeof(resp)); | 1455 | blk_rings->x86_64.rsp_prod_pvt); |
| 1463 | break; | 1456 | break; |
| 1464 | default: | 1457 | default: |
| 1465 | BUG(); | 1458 | BUG(); |
| 1466 | } | 1459 | } |
| 1460 | |||
| 1461 | resp->id = id; | ||
| 1462 | resp->operation = op; | ||
| 1463 | resp->status = st; | ||
| 1464 | |||
| 1467 | blk_rings->common.rsp_prod_pvt++; | 1465 | blk_rings->common.rsp_prod_pvt++; |
| 1468 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); | 1466 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); |
| 1469 | spin_unlock_irqrestore(&ring->blk_ring_lock, flags); | 1467 | spin_unlock_irqrestore(&ring->blk_ring_lock, flags); |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index dea61f6ab8cb..ecb35fe8ca8d 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
| @@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues; | |||
| 75 | struct blkif_common_request { | 75 | struct blkif_common_request { |
| 76 | char dummy; | 76 | char dummy; |
| 77 | }; | 77 | }; |
| 78 | struct blkif_common_response { | 78 | |
| 79 | char dummy; | 79 | /* i386 protocol version */ |
| 80 | }; | ||
| 81 | 80 | ||
| 82 | struct blkif_x86_32_request_rw { | 81 | struct blkif_x86_32_request_rw { |
| 83 | uint8_t nr_segments; /* number of segments */ | 82 | uint8_t nr_segments; /* number of segments */ |
| @@ -129,14 +128,6 @@ struct blkif_x86_32_request { | |||
| 129 | } u; | 128 | } u; |
| 130 | } __attribute__((__packed__)); | 129 | } __attribute__((__packed__)); |
| 131 | 130 | ||
| 132 | /* i386 protocol version */ | ||
| 133 | #pragma pack(push, 4) | ||
| 134 | struct blkif_x86_32_response { | ||
| 135 | uint64_t id; /* copied from request */ | ||
| 136 | uint8_t operation; /* copied from request */ | ||
| 137 | int16_t status; /* BLKIF_RSP_??? */ | ||
| 138 | }; | ||
| 139 | #pragma pack(pop) | ||
| 140 | /* x86_64 protocol version */ | 131 | /* x86_64 protocol version */ |
| 141 | 132 | ||
| 142 | struct blkif_x86_64_request_rw { | 133 | struct blkif_x86_64_request_rw { |
| @@ -193,18 +184,12 @@ struct blkif_x86_64_request { | |||
| 193 | } u; | 184 | } u; |
| 194 | } __attribute__((__packed__)); | 185 | } __attribute__((__packed__)); |
| 195 | 186 | ||
| 196 | struct blkif_x86_64_response { | ||
| 197 | uint64_t __attribute__((__aligned__(8))) id; | ||
| 198 | uint8_t operation; /* copied from request */ | ||
| 199 | int16_t status; /* BLKIF_RSP_??? */ | ||
| 200 | }; | ||
| 201 | |||
| 202 | DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, | 187 | DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, |
| 203 | struct blkif_common_response); | 188 | struct blkif_response); |
| 204 | DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, | 189 | DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, |
| 205 | struct blkif_x86_32_response); | 190 | struct blkif_response __packed); |
| 206 | DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, | 191 | DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, |
| 207 | struct blkif_x86_64_response); | 192 | struct blkif_response); |
| 208 | 193 | ||
| 209 | union blkif_back_rings { | 194 | union blkif_back_rings { |
| 210 | struct blkif_back_ring native; | 195 | struct blkif_back_ring native; |
| @@ -281,6 +266,7 @@ struct xen_blkif_ring { | |||
| 281 | 266 | ||
| 282 | wait_queue_head_t wq; | 267 | wait_queue_head_t wq; |
| 283 | atomic_t inflight; | 268 | atomic_t inflight; |
| 269 | bool active; | ||
| 284 | /* One thread per blkif ring. */ | 270 | /* One thread per blkif ring. */ |
| 285 | struct task_struct *xenblkd; | 271 | struct task_struct *xenblkd; |
| 286 | unsigned int waiting_reqs; | 272 | unsigned int waiting_reqs; |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 1f3dfaa54d87..792da683e70d 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
| @@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif) | |||
| 159 | init_waitqueue_head(&ring->shutdown_wq); | 159 | init_waitqueue_head(&ring->shutdown_wq); |
| 160 | ring->blkif = blkif; | 160 | ring->blkif = blkif; |
| 161 | ring->st_print = jiffies; | 161 | ring->st_print = jiffies; |
| 162 | xen_blkif_get(blkif); | 162 | ring->active = true; |
| 163 | } | 163 | } |
| 164 | 164 | ||
| 165 | return 0; | 165 | return 0; |
| @@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) | |||
| 249 | struct xen_blkif_ring *ring = &blkif->rings[r]; | 249 | struct xen_blkif_ring *ring = &blkif->rings[r]; |
| 250 | unsigned int i = 0; | 250 | unsigned int i = 0; |
| 251 | 251 | ||
| 252 | if (!ring->active) | ||
| 253 | continue; | ||
| 254 | |||
| 252 | if (ring->xenblkd) { | 255 | if (ring->xenblkd) { |
| 253 | kthread_stop(ring->xenblkd); | 256 | kthread_stop(ring->xenblkd); |
| 254 | wake_up(&ring->shutdown_wq); | 257 | wake_up(&ring->shutdown_wq); |
| 255 | ring->xenblkd = NULL; | ||
| 256 | } | 258 | } |
| 257 | 259 | ||
| 258 | /* The above kthread_stop() guarantees that at this point we | 260 | /* The above kthread_stop() guarantees that at this point we |
| @@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) | |||
| 296 | BUG_ON(ring->free_pages_num != 0); | 298 | BUG_ON(ring->free_pages_num != 0); |
| 297 | BUG_ON(ring->persistent_gnt_c != 0); | 299 | BUG_ON(ring->persistent_gnt_c != 0); |
| 298 | WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); | 300 | WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); |
| 299 | xen_blkif_put(blkif); | 301 | ring->active = false; |
| 300 | } | 302 | } |
| 301 | blkif->nr_ring_pages = 0; | 303 | blkif->nr_ring_pages = 0; |
| 302 | /* | 304 | /* |
| @@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) | |||
| 312 | 314 | ||
| 313 | static void xen_blkif_free(struct xen_blkif *blkif) | 315 | static void xen_blkif_free(struct xen_blkif *blkif) |
| 314 | { | 316 | { |
| 315 | 317 | WARN_ON(xen_blkif_disconnect(blkif)); | |
| 316 | xen_blkif_disconnect(blkif); | ||
| 317 | xen_vbd_free(&blkif->vbd); | 318 | xen_vbd_free(&blkif->vbd); |
| 319 | kfree(blkif->be->mode); | ||
| 320 | kfree(blkif->be); | ||
| 318 | 321 | ||
| 319 | /* Make sure everything is drained before shutting down */ | 322 | /* Make sure everything is drained before shutting down */ |
| 320 | kmem_cache_free(xen_blkif_cachep, blkif); | 323 | kmem_cache_free(xen_blkif_cachep, blkif); |
| @@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev) | |||
| 511 | xen_blkif_put(be->blkif); | 514 | xen_blkif_put(be->blkif); |
| 512 | } | 515 | } |
| 513 | 516 | ||
| 514 | kfree(be->mode); | ||
| 515 | kfree(be); | ||
| 516 | return 0; | 517 | return 0; |
| 517 | } | 518 | } |
| 518 | 519 | ||
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 6e0cbe092220..593a8818aca9 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
| @@ -343,7 +343,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma) | |||
| 343 | phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; | 343 | phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; |
| 344 | 344 | ||
| 345 | /* It's illegal to wrap around the end of the physical address space. */ | 345 | /* It's illegal to wrap around the end of the physical address space. */ |
| 346 | if (offset + (phys_addr_t)size < offset) | 346 | if (offset + (phys_addr_t)size - 1 < offset) |
| 347 | return -EINVAL; | 347 | return -EINVAL; |
| 348 | 348 | ||
| 349 | if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) | 349 | if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) |
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c index d4dbd8d8e524..382c864814d9 100644 --- a/drivers/char/pcmcia/cm4040_cs.c +++ b/drivers/char/pcmcia/cm4040_cs.c | |||
| @@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, | |||
| 374 | 374 | ||
| 375 | rc = write_sync_reg(SCR_HOST_TO_READER_START, dev); | 375 | rc = write_sync_reg(SCR_HOST_TO_READER_START, dev); |
| 376 | if (rc <= 0) { | 376 | if (rc <= 0) { |
| 377 | DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); | 377 | DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); |
| 378 | DEBUGP(2, dev, "<- cm4040_write (failed)\n"); | 378 | DEBUGP(2, dev, "<- cm4040_write (failed)\n"); |
| 379 | if (rc == -ERESTARTSYS) | 379 | if (rc == -ERESTARTSYS) |
| 380 | return rc; | 380 | return rc; |
| @@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, | |||
| 387 | for (i = 0; i < bytes_to_write; i++) { | 387 | for (i = 0; i < bytes_to_write; i++) { |
| 388 | rc = wait_for_bulk_out_ready(dev); | 388 | rc = wait_for_bulk_out_ready(dev); |
| 389 | if (rc <= 0) { | 389 | if (rc <= 0) { |
| 390 | DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n", | 390 | DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n", |
| 391 | rc); | 391 | rc); |
| 392 | DEBUGP(2, dev, "<- cm4040_write (failed)\n"); | 392 | DEBUGP(2, dev, "<- cm4040_write (failed)\n"); |
| 393 | if (rc == -ERESTARTSYS) | 393 | if (rc == -ERESTARTSYS) |
| @@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, | |||
| 403 | rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev); | 403 | rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev); |
| 404 | 404 | ||
| 405 | if (rc <= 0) { | 405 | if (rc <= 0) { |
| 406 | DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); | 406 | DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); |
| 407 | DEBUGP(2, dev, "<- cm4040_write (failed)\n"); | 407 | DEBUGP(2, dev, "<- cm4040_write (failed)\n"); |
| 408 | if (rc == -ERESTARTSYS) | 408 | if (rc == -ERESTARTSYS) |
| 409 | return rc; | 409 | return rc; |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 0ab024918907..01a260f67437 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
| @@ -1,6 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * random.c -- A strong random number generator | 2 | * random.c -- A strong random number generator |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All | ||
| 5 | * Rights Reserved. | ||
| 6 | * | ||
| 4 | * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 | 7 | * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 |
| 5 | * | 8 | * |
| 6 | * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All | 9 | * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All |
| @@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); | |||
| 762 | static struct crng_state **crng_node_pool __read_mostly; | 765 | static struct crng_state **crng_node_pool __read_mostly; |
| 763 | #endif | 766 | #endif |
| 764 | 767 | ||
| 768 | static void invalidate_batched_entropy(void); | ||
| 769 | |||
| 765 | static void crng_initialize(struct crng_state *crng) | 770 | static void crng_initialize(struct crng_state *crng) |
| 766 | { | 771 | { |
| 767 | int i; | 772 | int i; |
| @@ -798,12 +803,13 @@ static int crng_fast_load(const char *cp, size_t len) | |||
| 798 | p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp; | 803 | p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp; |
| 799 | cp++; crng_init_cnt++; len--; | 804 | cp++; crng_init_cnt++; len--; |
| 800 | } | 805 | } |
| 806 | spin_unlock_irqrestore(&primary_crng.lock, flags); | ||
| 801 | if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { | 807 | if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { |
| 808 | invalidate_batched_entropy(); | ||
| 802 | crng_init = 1; | 809 | crng_init = 1; |
| 803 | wake_up_interruptible(&crng_init_wait); | 810 | wake_up_interruptible(&crng_init_wait); |
| 804 | pr_notice("random: fast init done\n"); | 811 | pr_notice("random: fast init done\n"); |
| 805 | } | 812 | } |
| 806 | spin_unlock_irqrestore(&primary_crng.lock, flags); | ||
| 807 | return 1; | 813 | return 1; |
| 808 | } | 814 | } |
| 809 | 815 | ||
| @@ -835,13 +841,14 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) | |||
| 835 | } | 841 | } |
| 836 | memzero_explicit(&buf, sizeof(buf)); | 842 | memzero_explicit(&buf, sizeof(buf)); |
| 837 | crng->init_time = jiffies; | 843 | crng->init_time = jiffies; |
| 844 | spin_unlock_irqrestore(&primary_crng.lock, flags); | ||
| 838 | if (crng == &primary_crng && crng_init < 2) { | 845 | if (crng == &primary_crng && crng_init < 2) { |
| 846 | invalidate_batched_entropy(); | ||
| 839 | crng_init = 2; | 847 | crng_init = 2; |
| 840 | process_random_ready_list(); | 848 | process_random_ready_list(); |
| 841 | wake_up_interruptible(&crng_init_wait); | 849 | wake_up_interruptible(&crng_init_wait); |
| 842 | pr_notice("random: crng init done\n"); | 850 | pr_notice("random: crng init done\n"); |
| 843 | } | 851 | } |
| 844 | spin_unlock_irqrestore(&primary_crng.lock, flags); | ||
| 845 | } | 852 | } |
| 846 | 853 | ||
| 847 | static inline void crng_wait_ready(void) | 854 | static inline void crng_wait_ready(void) |
| @@ -1097,12 +1104,16 @@ static void add_interrupt_bench(cycles_t start) | |||
| 1097 | static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) | 1104 | static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) |
| 1098 | { | 1105 | { |
| 1099 | __u32 *ptr = (__u32 *) regs; | 1106 | __u32 *ptr = (__u32 *) regs; |
| 1107 | unsigned int idx; | ||
| 1100 | 1108 | ||
| 1101 | if (regs == NULL) | 1109 | if (regs == NULL) |
| 1102 | return 0; | 1110 | return 0; |
| 1103 | if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) | 1111 | idx = READ_ONCE(f->reg_idx); |
| 1104 | f->reg_idx = 0; | 1112 | if (idx >= sizeof(struct pt_regs) / sizeof(__u32)) |
| 1105 | return *(ptr + f->reg_idx++); | 1113 | idx = 0; |
| 1114 | ptr += idx++; | ||
| 1115 | WRITE_ONCE(f->reg_idx, idx); | ||
| 1116 | return *ptr; | ||
| 1106 | } | 1117 | } |
| 1107 | 1118 | ||
| 1108 | void add_interrupt_randomness(int irq, int irq_flags) | 1119 | void add_interrupt_randomness(int irq, int irq_flags) |
| @@ -2019,6 +2030,7 @@ struct batched_entropy { | |||
| 2019 | }; | 2030 | }; |
| 2020 | unsigned int position; | 2031 | unsigned int position; |
| 2021 | }; | 2032 | }; |
| 2033 | static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock); | ||
| 2022 | 2034 | ||
| 2023 | /* | 2035 | /* |
| 2024 | * Get a random word for internal kernel use only. The quality of the random | 2036 | * Get a random word for internal kernel use only. The quality of the random |
| @@ -2029,6 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); | |||
| 2029 | u64 get_random_u64(void) | 2041 | u64 get_random_u64(void) |
| 2030 | { | 2042 | { |
| 2031 | u64 ret; | 2043 | u64 ret; |
| 2044 | bool use_lock = READ_ONCE(crng_init) < 2; | ||
| 2045 | unsigned long flags = 0; | ||
| 2032 | struct batched_entropy *batch; | 2046 | struct batched_entropy *batch; |
| 2033 | 2047 | ||
| 2034 | #if BITS_PER_LONG == 64 | 2048 | #if BITS_PER_LONG == 64 |
| @@ -2041,11 +2055,15 @@ u64 get_random_u64(void) | |||
| 2041 | #endif | 2055 | #endif |
| 2042 | 2056 | ||
| 2043 | batch = &get_cpu_var(batched_entropy_u64); | 2057 | batch = &get_cpu_var(batched_entropy_u64); |
| 2058 | if (use_lock) | ||
| 2059 | read_lock_irqsave(&batched_entropy_reset_lock, flags); | ||
| 2044 | if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { | 2060 | if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { |
| 2045 | extract_crng((u8 *)batch->entropy_u64); | 2061 | extract_crng((u8 *)batch->entropy_u64); |
| 2046 | batch->position = 0; | 2062 | batch->position = 0; |
| 2047 | } | 2063 | } |
| 2048 | ret = batch->entropy_u64[batch->position++]; | 2064 | ret = batch->entropy_u64[batch->position++]; |
| 2065 | if (use_lock) | ||
| 2066 | read_unlock_irqrestore(&batched_entropy_reset_lock, flags); | ||
| 2049 | put_cpu_var(batched_entropy_u64); | 2067 | put_cpu_var(batched_entropy_u64); |
| 2050 | return ret; | 2068 | return ret; |
| 2051 | } | 2069 | } |
| @@ -2055,22 +2073,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); | |||
| 2055 | u32 get_random_u32(void) | 2073 | u32 get_random_u32(void) |
| 2056 | { | 2074 | { |
| 2057 | u32 ret; | 2075 | u32 ret; |
| 2076 | bool use_lock = READ_ONCE(crng_init) < 2; | ||
| 2077 | unsigned long flags = 0; | ||
| 2058 | struct batched_entropy *batch; | 2078 | struct batched_entropy *batch; |
| 2059 | 2079 | ||
| 2060 | if (arch_get_random_int(&ret)) | 2080 | if (arch_get_random_int(&ret)) |
| 2061 | return ret; | 2081 | return ret; |
| 2062 | 2082 | ||
| 2063 | batch = &get_cpu_var(batched_entropy_u32); | 2083 | batch = &get_cpu_var(batched_entropy_u32); |
| 2084 | if (use_lock) | ||
| 2085 | read_lock_irqsave(&batched_entropy_reset_lock, flags); | ||
| 2064 | if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { | 2086 | if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { |
| 2065 | extract_crng((u8 *)batch->entropy_u32); | 2087 | extract_crng((u8 *)batch->entropy_u32); |
| 2066 | batch->position = 0; | 2088 | batch->position = 0; |
| 2067 | } | 2089 | } |
| 2068 | ret = batch->entropy_u32[batch->position++]; | 2090 | ret = batch->entropy_u32[batch->position++]; |
| 2091 | if (use_lock) | ||
| 2092 | read_unlock_irqrestore(&batched_entropy_reset_lock, flags); | ||
| 2069 | put_cpu_var(batched_entropy_u32); | 2093 | put_cpu_var(batched_entropy_u32); |
| 2070 | return ret; | 2094 | return ret; |
| 2071 | } | 2095 | } |
| 2072 | EXPORT_SYMBOL(get_random_u32); | 2096 | EXPORT_SYMBOL(get_random_u32); |
| 2073 | 2097 | ||
| 2098 | /* It's important to invalidate all potential batched entropy that might | ||
| 2099 | * be stored before the crng is initialized, which we can do lazily by | ||
| 2100 | * simply resetting the counter to zero so that it's re-extracted on the | ||
| 2101 | * next usage. */ | ||
| 2102 | static void invalidate_batched_entropy(void) | ||
| 2103 | { | ||
| 2104 | int cpu; | ||
| 2105 | unsigned long flags; | ||
| 2106 | |||
| 2107 | write_lock_irqsave(&batched_entropy_reset_lock, flags); | ||
| 2108 | for_each_possible_cpu (cpu) { | ||
| 2109 | per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0; | ||
| 2110 | per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0; | ||
| 2111 | } | ||
| 2112 | write_unlock_irqrestore(&batched_entropy_reset_lock, flags); | ||
| 2113 | } | ||
| 2114 | |||
| 2074 | /** | 2115 | /** |
| 2075 | * randomize_page - Generate a random, page aligned address | 2116 | * randomize_page - Generate a random, page aligned address |
| 2076 | * @start: The smallest acceptable address the caller will take. | 2117 | * @start: The smallest acceptable address the caller will take. |
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig index 19480bcc7046..2f29ee1a4d00 100644 --- a/drivers/clk/meson/Kconfig +++ b/drivers/clk/meson/Kconfig | |||
| @@ -14,6 +14,7 @@ config COMMON_CLK_MESON8B | |||
| 14 | config COMMON_CLK_GXBB | 14 | config COMMON_CLK_GXBB |
| 15 | bool | 15 | bool |
| 16 | depends on COMMON_CLK_AMLOGIC | 16 | depends on COMMON_CLK_AMLOGIC |
| 17 | select RESET_CONTROLLER | ||
| 17 | help | 18 | help |
| 18 | Support for the clock controller on AmLogic S905 devices, aka gxbb. | 19 | Support for the clock controller on AmLogic S905 devices, aka gxbb. |
| 19 | Say Y if you want peripherals and CPU frequency scaling to work. | 20 | Say Y if you want peripherals and CPU frequency scaling to work. |
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig index b0d551a8efe4..eb89c7801f00 100644 --- a/drivers/clk/sunxi-ng/Kconfig +++ b/drivers/clk/sunxi-ng/Kconfig | |||
| @@ -156,6 +156,7 @@ config SUN8I_R_CCU | |||
| 156 | bool "Support for Allwinner SoCs' PRCM CCUs" | 156 | bool "Support for Allwinner SoCs' PRCM CCUs" |
| 157 | select SUNXI_CCU_DIV | 157 | select SUNXI_CCU_DIV |
| 158 | select SUNXI_CCU_GATE | 158 | select SUNXI_CCU_GATE |
| 159 | select SUNXI_CCU_MP | ||
| 159 | default MACH_SUN8I || (ARCH_SUNXI && ARM64) | 160 | default MACH_SUN8I || (ARCH_SUNXI && ARM64) |
| 160 | 161 | ||
| 161 | endif | 162 | endif |
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h index 9b3cd24b78d2..061b6fbb4f95 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h | |||
| @@ -31,7 +31,9 @@ | |||
| 31 | #define CLK_PLL_VIDEO0_2X 8 | 31 | #define CLK_PLL_VIDEO0_2X 8 |
| 32 | #define CLK_PLL_VE 9 | 32 | #define CLK_PLL_VE 9 |
| 33 | #define CLK_PLL_DDR0 10 | 33 | #define CLK_PLL_DDR0 10 |
| 34 | #define CLK_PLL_PERIPH0 11 | 34 | |
| 35 | /* PLL_PERIPH0 exported for PRCM */ | ||
| 36 | |||
| 35 | #define CLK_PLL_PERIPH0_2X 12 | 37 | #define CLK_PLL_PERIPH0_2X 12 |
| 36 | #define CLK_PLL_PERIPH1 13 | 38 | #define CLK_PLL_PERIPH1 13 |
| 37 | #define CLK_PLL_PERIPH1_2X 14 | 39 | #define CLK_PLL_PERIPH1_2X 14 |
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c index 5c476f966a72..5372bf8be5e6 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.c +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c | |||
| @@ -243,7 +243,7 @@ static SUNXI_CCU_GATE(ahb_ss_clk, "ahb-ss", "ahb", | |||
| 243 | static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb", | 243 | static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb", |
| 244 | 0x060, BIT(6), 0); | 244 | 0x060, BIT(6), 0); |
| 245 | static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb", | 245 | static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb", |
| 246 | 0x060, BIT(6), 0); | 246 | 0x060, BIT(7), 0); |
| 247 | static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb", | 247 | static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb", |
| 248 | 0x060, BIT(8), 0); | 248 | 0x060, BIT(8), 0); |
| 249 | static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb", | 249 | static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb", |
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 89e68d29bf45..df97e25aec76 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c | |||
| @@ -556,7 +556,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents, | |||
| 556 | 0x12c, 0, 4, 24, 3, BIT(31), | 556 | 0x12c, 0, 4, 24, 3, BIT(31), |
| 557 | CLK_SET_RATE_PARENT); | 557 | CLK_SET_RATE_PARENT); |
| 558 | static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents, | 558 | static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents, |
| 559 | 0x12c, 0, 4, 24, 3, BIT(31), | 559 | 0x130, 0, 4, 24, 3, BIT(31), |
| 560 | CLK_SET_RATE_PARENT); | 560 | CLK_SET_RATE_PARENT); |
| 561 | 561 | ||
| 562 | static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1", | 562 | static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1", |
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h index 85973d1e8165..1b4baea37d81 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h | |||
| @@ -29,7 +29,9 @@ | |||
| 29 | #define CLK_PLL_VIDEO 6 | 29 | #define CLK_PLL_VIDEO 6 |
| 30 | #define CLK_PLL_VE 7 | 30 | #define CLK_PLL_VE 7 |
| 31 | #define CLK_PLL_DDR 8 | 31 | #define CLK_PLL_DDR 8 |
| 32 | #define CLK_PLL_PERIPH0 9 | 32 | |
| 33 | /* PLL_PERIPH0 exported for PRCM */ | ||
| 34 | |||
| 33 | #define CLK_PLL_PERIPH0_2X 10 | 35 | #define CLK_PLL_PERIPH0_2X 10 |
| 34 | #define CLK_PLL_GPU 11 | 36 | #define CLK_PLL_GPU 11 |
| 35 | #define CLK_PLL_PERIPH1 12 | 37 | #define CLK_PLL_PERIPH1 12 |
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index e58706b40ae9..6297add857b5 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c | |||
| @@ -537,7 +537,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { | |||
| 537 | [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, | 537 | [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, |
| 538 | [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, | 538 | [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, |
| 539 | [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, | 539 | [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, |
| 540 | [RST_BUS_OTG] = { 0x2c0, BIT(23) }, | 540 | [RST_BUS_OTG] = { 0x2c0, BIT(24) }, |
| 541 | [RST_BUS_EHCI0] = { 0x2c0, BIT(26) }, | 541 | [RST_BUS_EHCI0] = { 0x2c0, BIT(26) }, |
| 542 | [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, | 542 | [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, |
| 543 | 543 | ||
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 4bed671e490e..8b5c30062d99 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
| @@ -1209,9 +1209,9 @@ arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame) | |||
| 1209 | return 0; | 1209 | return 0; |
| 1210 | } | 1210 | } |
| 1211 | 1211 | ||
| 1212 | rate = readl_relaxed(frame + CNTFRQ); | 1212 | rate = readl_relaxed(base + CNTFRQ); |
| 1213 | 1213 | ||
| 1214 | iounmap(frame); | 1214 | iounmap(base); |
| 1215 | 1215 | ||
| 1216 | return rate; | 1216 | return rate; |
| 1217 | } | 1217 | } |
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c index 44e5e951583b..8e64b8460f11 100644 --- a/drivers/clocksource/cadence_ttc_timer.c +++ b/drivers/clocksource/cadence_ttc_timer.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
| 19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
| 20 | #include <linux/clockchips.h> | 20 | #include <linux/clockchips.h> |
| 21 | #include <linux/clocksource.h> | ||
| 21 | #include <linux/of_address.h> | 22 | #include <linux/of_address.h> |
| 22 | #include <linux/of_irq.h> | 23 | #include <linux/of_irq.h> |
| 23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 2e9c830ae1cd..c4656c4d44a6 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | #include <linux/clk.h> | 13 | #include <linux/clk.h> |
| 14 | #include <linux/clockchips.h> | 14 | #include <linux/clockchips.h> |
| 15 | #include <linux/clocksource.h> | ||
| 15 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
| 16 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
| 17 | #include <linux/irq.h> | 18 | #include <linux/irq.h> |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 0e3f6496524d..26b643d57847 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -2468,6 +2468,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
| 2468 | if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && | 2468 | if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && |
| 2469 | list_empty(&cpufreq_policy_list)) { | 2469 | list_empty(&cpufreq_policy_list)) { |
| 2470 | /* if all ->init() calls failed, unregister */ | 2470 | /* if all ->init() calls failed, unregister */ |
| 2471 | ret = -ENODEV; | ||
| 2471 | pr_debug("%s: No CPU initialized for driver %s\n", __func__, | 2472 | pr_debug("%s: No CPU initialized for driver %s\n", __func__, |
| 2472 | driver_data->name); | 2473 | driver_data->name); |
| 2473 | goto err_if_unreg; | 2474 | goto err_if_unreg; |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 992f7c20760f..88220ff3e1c2 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
| @@ -185,8 +185,8 @@ static ssize_t store_down_threshold(struct gov_attr_set *attr_set, | |||
| 185 | int ret; | 185 | int ret; |
| 186 | ret = sscanf(buf, "%u", &input); | 186 | ret = sscanf(buf, "%u", &input); |
| 187 | 187 | ||
| 188 | /* cannot be lower than 11 otherwise freq will not fall */ | 188 | /* cannot be lower than 1 otherwise freq will not fall */ |
| 189 | if (ret != 1 || input < 11 || input > 100 || | 189 | if (ret != 1 || input < 1 || input > 100 || |
| 190 | input >= dbs_data->up_threshold) | 190 | input >= dbs_data->up_threshold) |
| 191 | return -EINVAL; | 191 | return -EINVAL; |
| 192 | 192 | ||
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b7de5bd76a31..eb1158532de3 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -571,9 +571,10 @@ static inline void update_turbo_state(void) | |||
| 571 | static int min_perf_pct_min(void) | 571 | static int min_perf_pct_min(void) |
| 572 | { | 572 | { |
| 573 | struct cpudata *cpu = all_cpu_data[0]; | 573 | struct cpudata *cpu = all_cpu_data[0]; |
| 574 | int turbo_pstate = cpu->pstate.turbo_pstate; | ||
| 574 | 575 | ||
| 575 | return DIV_ROUND_UP(cpu->pstate.min_pstate * 100, | 576 | return turbo_pstate ? |
| 576 | cpu->pstate.turbo_pstate); | 577 | DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0; |
| 577 | } | 578 | } |
| 578 | 579 | ||
| 579 | static s16 intel_pstate_get_epb(struct cpudata *cpu_data) | 580 | static s16 intel_pstate_get_epb(struct cpudata *cpu_data) |
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index 1b9bcd76c60e..c2dd43f3f5d8 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c | |||
| @@ -127,7 +127,12 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) | |||
| 127 | return PTR_ERR(priv.cpu_clk); | 127 | return PTR_ERR(priv.cpu_clk); |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | clk_prepare_enable(priv.cpu_clk); | 130 | err = clk_prepare_enable(priv.cpu_clk); |
| 131 | if (err) { | ||
| 132 | dev_err(priv.dev, "Unable to prepare cpuclk\n"); | ||
| 133 | return err; | ||
| 134 | } | ||
| 135 | |||
| 131 | kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000; | 136 | kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000; |
| 132 | 137 | ||
| 133 | priv.ddr_clk = of_clk_get_by_name(np, "ddrclk"); | 138 | priv.ddr_clk = of_clk_get_by_name(np, "ddrclk"); |
| @@ -137,7 +142,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) | |||
| 137 | goto out_cpu; | 142 | goto out_cpu; |
| 138 | } | 143 | } |
| 139 | 144 | ||
| 140 | clk_prepare_enable(priv.ddr_clk); | 145 | err = clk_prepare_enable(priv.ddr_clk); |
| 146 | if (err) { | ||
| 147 | dev_err(priv.dev, "Unable to prepare ddrclk\n"); | ||
| 148 | goto out_cpu; | ||
| 149 | } | ||
| 141 | kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000; | 150 | kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000; |
| 142 | 151 | ||
| 143 | priv.powersave_clk = of_clk_get_by_name(np, "powersave"); | 152 | priv.powersave_clk = of_clk_get_by_name(np, "powersave"); |
| @@ -146,7 +155,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) | |||
| 146 | err = PTR_ERR(priv.powersave_clk); | 155 | err = PTR_ERR(priv.powersave_clk); |
| 147 | goto out_ddr; | 156 | goto out_ddr; |
| 148 | } | 157 | } |
| 149 | clk_prepare_enable(priv.powersave_clk); | 158 | err = clk_prepare_enable(priv.powersave_clk); |
| 159 | if (err) { | ||
| 160 | dev_err(priv.dev, "Unable to prepare powersave clk\n"); | ||
| 161 | goto out_ddr; | ||
| 162 | } | ||
| 150 | 163 | ||
| 151 | of_node_put(np); | 164 | of_node_put(np); |
| 152 | np = NULL; | 165 | np = NULL; |
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index ffca4fc0061d..ae8eb0359889 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c | |||
| @@ -180,8 +180,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, | |||
| 180 | if (!state_node) | 180 | if (!state_node) |
| 181 | break; | 181 | break; |
| 182 | 182 | ||
| 183 | if (!of_device_is_available(state_node)) | 183 | if (!of_device_is_available(state_node)) { |
| 184 | of_node_put(state_node); | ||
| 184 | continue; | 185 | continue; |
| 186 | } | ||
| 185 | 187 | ||
| 186 | if (!idle_state_valid(state_node, i, cpumask)) { | 188 | if (!idle_state_valid(state_node, i, cpumask)) { |
| 187 | pr_warn("%s idle state not valid, bailing out\n", | 189 | pr_warn("%s idle state not valid, bailing out\n", |
diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 6ed32aac8bbe..922d0823f8ec 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c | |||
| @@ -210,9 +210,12 @@ EXPORT_SYMBOL_GPL(kill_dax); | |||
| 210 | static struct inode *dax_alloc_inode(struct super_block *sb) | 210 | static struct inode *dax_alloc_inode(struct super_block *sb) |
| 211 | { | 211 | { |
| 212 | struct dax_device *dax_dev; | 212 | struct dax_device *dax_dev; |
| 213 | struct inode *inode; | ||
| 213 | 214 | ||
| 214 | dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); | 215 | dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); |
| 215 | return &dax_dev->inode; | 216 | inode = &dax_dev->inode; |
| 217 | inode->i_rdev = 0; | ||
| 218 | return inode; | ||
| 216 | } | 219 | } |
| 217 | 220 | ||
| 218 | static struct dax_device *to_dax_dev(struct inode *inode) | 221 | static struct dax_device *to_dax_dev(struct inode *inode) |
| @@ -227,7 +230,8 @@ static void dax_i_callback(struct rcu_head *head) | |||
| 227 | 230 | ||
| 228 | kfree(dax_dev->host); | 231 | kfree(dax_dev->host); |
| 229 | dax_dev->host = NULL; | 232 | dax_dev->host = NULL; |
| 230 | ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev)); | 233 | if (inode->i_rdev) |
| 234 | ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev)); | ||
| 231 | kmem_cache_free(dax_cache, dax_dev); | 235 | kmem_cache_free(dax_cache, dax_dev); |
| 232 | } | 236 | } |
| 233 | 237 | ||
| @@ -423,6 +427,7 @@ static void init_once(void *_dax_dev) | |||
| 423 | struct dax_device *dax_dev = _dax_dev; | 427 | struct dax_device *dax_dev = _dax_dev; |
| 424 | struct inode *inode = &dax_dev->inode; | 428 | struct inode *inode = &dax_dev->inode; |
| 425 | 429 | ||
| 430 | memset(dax_dev, 0, sizeof(*dax_dev)); | ||
| 426 | inode_init_once(inode); | 431 | inode_init_once(inode); |
| 427 | } | 432 | } |
| 428 | 433 | ||
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c index 5c3e7b11e8a6..f6e7956fc91a 100644 --- a/drivers/devfreq/event/exynos-nocp.c +++ b/drivers/devfreq/event/exynos-nocp.c | |||
| @@ -267,7 +267,11 @@ static int exynos_nocp_probe(struct platform_device *pdev) | |||
| 267 | } | 267 | } |
| 268 | platform_set_drvdata(pdev, nocp); | 268 | platform_set_drvdata(pdev, nocp); |
| 269 | 269 | ||
| 270 | clk_prepare_enable(nocp->clk); | 270 | ret = clk_prepare_enable(nocp->clk); |
| 271 | if (ret) { | ||
| 272 | dev_err(&pdev->dev, "failed to prepare ppmu clock\n"); | ||
| 273 | return ret; | ||
| 274 | } | ||
| 271 | 275 | ||
| 272 | pr_info("exynos-nocp: new NoC Probe device registered: %s\n", | 276 | pr_info("exynos-nocp: new NoC Probe device registered: %s\n", |
| 273 | dev_name(dev)); | 277 | dev_name(dev)); |
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 9b7350935b73..d96e3dc71cf8 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c | |||
| @@ -44,7 +44,7 @@ struct exynos_ppmu { | |||
| 44 | { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \ | 44 | { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \ |
| 45 | { "ppmu-event3-"#name, PPMU_PMNCNT3 } | 45 | { "ppmu-event3-"#name, PPMU_PMNCNT3 } |
| 46 | 46 | ||
| 47 | struct __exynos_ppmu_events { | 47 | static struct __exynos_ppmu_events { |
| 48 | char *name; | 48 | char *name; |
| 49 | int id; | 49 | int id; |
| 50 | } ppmu_events[] = { | 50 | } ppmu_events[] = { |
| @@ -648,7 +648,11 @@ static int exynos_ppmu_probe(struct platform_device *pdev) | |||
| 648 | dev_name(&pdev->dev), desc[i].name); | 648 | dev_name(&pdev->dev), desc[i].name); |
| 649 | } | 649 | } |
| 650 | 650 | ||
| 651 | clk_prepare_enable(info->ppmu.clk); | 651 | ret = clk_prepare_enable(info->ppmu.clk); |
| 652 | if (ret) { | ||
| 653 | dev_err(&pdev->dev, "failed to prepare ppmu clock\n"); | ||
| 654 | return ret; | ||
| 655 | } | ||
| 652 | 656 | ||
| 653 | return 0; | 657 | return 0; |
| 654 | } | 658 | } |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index d37e8dda8079..ec240592f5c8 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
| @@ -201,6 +201,7 @@ struct ep93xx_dma_engine { | |||
| 201 | struct dma_device dma_dev; | 201 | struct dma_device dma_dev; |
| 202 | bool m2m; | 202 | bool m2m; |
| 203 | int (*hw_setup)(struct ep93xx_dma_chan *); | 203 | int (*hw_setup)(struct ep93xx_dma_chan *); |
| 204 | void (*hw_synchronize)(struct ep93xx_dma_chan *); | ||
| 204 | void (*hw_shutdown)(struct ep93xx_dma_chan *); | 205 | void (*hw_shutdown)(struct ep93xx_dma_chan *); |
| 205 | void (*hw_submit)(struct ep93xx_dma_chan *); | 206 | void (*hw_submit)(struct ep93xx_dma_chan *); |
| 206 | int (*hw_interrupt)(struct ep93xx_dma_chan *); | 207 | int (*hw_interrupt)(struct ep93xx_dma_chan *); |
| @@ -323,6 +324,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac) | |||
| 323 | | M2P_CONTROL_ENABLE; | 324 | | M2P_CONTROL_ENABLE; |
| 324 | m2p_set_control(edmac, control); | 325 | m2p_set_control(edmac, control); |
| 325 | 326 | ||
| 327 | edmac->buffer = 0; | ||
| 328 | |||
| 326 | return 0; | 329 | return 0; |
| 327 | } | 330 | } |
| 328 | 331 | ||
| @@ -331,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac) | |||
| 331 | return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; | 334 | return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; |
| 332 | } | 335 | } |
| 333 | 336 | ||
| 334 | static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) | 337 | static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac) |
| 335 | { | 338 | { |
| 339 | unsigned long flags; | ||
| 336 | u32 control; | 340 | u32 control; |
| 337 | 341 | ||
| 342 | spin_lock_irqsave(&edmac->lock, flags); | ||
| 338 | control = readl(edmac->regs + M2P_CONTROL); | 343 | control = readl(edmac->regs + M2P_CONTROL); |
| 339 | control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); | 344 | control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); |
| 340 | m2p_set_control(edmac, control); | 345 | m2p_set_control(edmac, control); |
| 346 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
| 341 | 347 | ||
| 342 | while (m2p_channel_state(edmac) >= M2P_STATE_ON) | 348 | while (m2p_channel_state(edmac) >= M2P_STATE_ON) |
| 343 | cpu_relax(); | 349 | schedule(); |
| 350 | } | ||
| 344 | 351 | ||
| 352 | static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) | ||
| 353 | { | ||
| 345 | m2p_set_control(edmac, 0); | 354 | m2p_set_control(edmac, 0); |
| 346 | 355 | ||
| 347 | while (m2p_channel_state(edmac) == M2P_STATE_STALL) | 356 | while (m2p_channel_state(edmac) != M2P_STATE_IDLE) |
| 348 | cpu_relax(); | 357 | dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n"); |
| 349 | } | 358 | } |
| 350 | 359 | ||
| 351 | static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) | 360 | static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) |
| @@ -1161,6 +1170,26 @@ fail: | |||
| 1161 | } | 1170 | } |
| 1162 | 1171 | ||
| 1163 | /** | 1172 | /** |
| 1173 | * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the | ||
| 1174 | * current context. | ||
| 1175 | * @chan: channel | ||
| 1176 | * | ||
| 1177 | * Synchronizes the DMA channel termination to the current context. When this | ||
| 1178 | * function returns it is guaranteed that all transfers for previously issued | ||
| 1179 | * descriptors have stopped and and it is safe to free the memory associated | ||
| 1180 | * with them. Furthermore it is guaranteed that all complete callback functions | ||
| 1181 | * for a previously submitted descriptor have finished running and it is safe to | ||
| 1182 | * free resources accessed from within the complete callbacks. | ||
| 1183 | */ | ||
| 1184 | static void ep93xx_dma_synchronize(struct dma_chan *chan) | ||
| 1185 | { | ||
| 1186 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
| 1187 | |||
| 1188 | if (edmac->edma->hw_synchronize) | ||
| 1189 | edmac->edma->hw_synchronize(edmac); | ||
| 1190 | } | ||
| 1191 | |||
| 1192 | /** | ||
| 1164 | * ep93xx_dma_terminate_all - terminate all transactions | 1193 | * ep93xx_dma_terminate_all - terminate all transactions |
| 1165 | * @chan: channel | 1194 | * @chan: channel |
| 1166 | * | 1195 | * |
| @@ -1323,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev) | |||
| 1323 | dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; | 1352 | dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; |
| 1324 | dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; | 1353 | dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; |
| 1325 | dma_dev->device_config = ep93xx_dma_slave_config; | 1354 | dma_dev->device_config = ep93xx_dma_slave_config; |
| 1355 | dma_dev->device_synchronize = ep93xx_dma_synchronize; | ||
| 1326 | dma_dev->device_terminate_all = ep93xx_dma_terminate_all; | 1356 | dma_dev->device_terminate_all = ep93xx_dma_terminate_all; |
| 1327 | dma_dev->device_issue_pending = ep93xx_dma_issue_pending; | 1357 | dma_dev->device_issue_pending = ep93xx_dma_issue_pending; |
| 1328 | dma_dev->device_tx_status = ep93xx_dma_tx_status; | 1358 | dma_dev->device_tx_status = ep93xx_dma_tx_status; |
| @@ -1340,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev) | |||
| 1340 | } else { | 1370 | } else { |
| 1341 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); | 1371 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); |
| 1342 | 1372 | ||
| 1373 | edma->hw_synchronize = m2p_hw_synchronize; | ||
| 1343 | edma->hw_setup = m2p_hw_setup; | 1374 | edma->hw_setup = m2p_hw_setup; |
| 1344 | edma->hw_shutdown = m2p_hw_shutdown; | 1375 | edma->hw_shutdown = m2p_hw_shutdown; |
| 1345 | edma->hw_submit = m2p_hw_submit; | 1376 | edma->hw_submit = m2p_hw_submit; |
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index a28a01fcba67..f3e211f8f6c5 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c | |||
| @@ -161,6 +161,7 @@ struct mv_xor_v2_device { | |||
| 161 | struct mv_xor_v2_sw_desc *sw_desq; | 161 | struct mv_xor_v2_sw_desc *sw_desq; |
| 162 | int desc_size; | 162 | int desc_size; |
| 163 | unsigned int npendings; | 163 | unsigned int npendings; |
| 164 | unsigned int hw_queue_idx; | ||
| 164 | }; | 165 | }; |
| 165 | 166 | ||
| 166 | /** | 167 | /** |
| @@ -214,18 +215,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev, | |||
| 214 | } | 215 | } |
| 215 | 216 | ||
| 216 | /* | 217 | /* |
| 217 | * Return the next available index in the DESQ. | ||
| 218 | */ | ||
| 219 | static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev) | ||
| 220 | { | ||
| 221 | /* read the index for the next available descriptor in the DESQ */ | ||
| 222 | u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF); | ||
| 223 | |||
| 224 | return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT) | ||
| 225 | & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK); | ||
| 226 | } | ||
| 227 | |||
| 228 | /* | ||
| 229 | * notify the engine of new descriptors, and update the available index. | 218 | * notify the engine of new descriptors, and update the available index. |
| 230 | */ | 219 | */ |
| 231 | static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, | 220 | static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, |
| @@ -257,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev) | |||
| 257 | return MV_XOR_V2_EXT_DESC_SIZE; | 246 | return MV_XOR_V2_EXT_DESC_SIZE; |
| 258 | } | 247 | } |
| 259 | 248 | ||
| 260 | /* | ||
| 261 | * Set the IMSG threshold | ||
| 262 | */ | ||
| 263 | static inline | ||
| 264 | void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val) | ||
| 265 | { | ||
| 266 | u32 reg; | ||
| 267 | |||
| 268 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); | ||
| 269 | |||
| 270 | reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); | ||
| 271 | reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); | ||
| 272 | |||
| 273 | writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); | ||
| 274 | } | ||
| 275 | |||
| 276 | static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) | 249 | static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) |
| 277 | { | 250 | { |
| 278 | struct mv_xor_v2_device *xor_dev = data; | 251 | struct mv_xor_v2_device *xor_dev = data; |
| @@ -288,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) | |||
| 288 | if (!ndescs) | 261 | if (!ndescs) |
| 289 | return IRQ_NONE; | 262 | return IRQ_NONE; |
| 290 | 263 | ||
| 291 | /* | ||
| 292 | * Update IMSG threshold, to disable new IMSG interrupts until | ||
| 293 | * end of the tasklet | ||
| 294 | */ | ||
| 295 | mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM); | ||
| 296 | |||
| 297 | /* schedule a tasklet to handle descriptors callbacks */ | 264 | /* schedule a tasklet to handle descriptors callbacks */ |
| 298 | tasklet_schedule(&xor_dev->irq_tasklet); | 265 | tasklet_schedule(&xor_dev->irq_tasklet); |
| 299 | 266 | ||
| @@ -306,7 +273,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) | |||
| 306 | static dma_cookie_t | 273 | static dma_cookie_t |
| 307 | mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) | 274 | mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) |
| 308 | { | 275 | { |
| 309 | int desq_ptr; | ||
| 310 | void *dest_hw_desc; | 276 | void *dest_hw_desc; |
| 311 | dma_cookie_t cookie; | 277 | dma_cookie_t cookie; |
| 312 | struct mv_xor_v2_sw_desc *sw_desc = | 278 | struct mv_xor_v2_sw_desc *sw_desc = |
| @@ -322,15 +288,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 322 | spin_lock_bh(&xor_dev->lock); | 288 | spin_lock_bh(&xor_dev->lock); |
| 323 | cookie = dma_cookie_assign(tx); | 289 | cookie = dma_cookie_assign(tx); |
| 324 | 290 | ||
| 325 | /* get the next available slot in the DESQ */ | ||
| 326 | desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev); | ||
| 327 | |||
| 328 | /* copy the HW descriptor from the SW descriptor to the DESQ */ | 291 | /* copy the HW descriptor from the SW descriptor to the DESQ */ |
| 329 | dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr; | 292 | dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx; |
| 330 | 293 | ||
| 331 | memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); | 294 | memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); |
| 332 | 295 | ||
| 333 | xor_dev->npendings++; | 296 | xor_dev->npendings++; |
| 297 | xor_dev->hw_queue_idx++; | ||
| 298 | if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM) | ||
| 299 | xor_dev->hw_queue_idx = 0; | ||
| 334 | 300 | ||
| 335 | spin_unlock_bh(&xor_dev->lock); | 301 | spin_unlock_bh(&xor_dev->lock); |
| 336 | 302 | ||
| @@ -344,6 +310,7 @@ static struct mv_xor_v2_sw_desc * | |||
| 344 | mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) | 310 | mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) |
| 345 | { | 311 | { |
| 346 | struct mv_xor_v2_sw_desc *sw_desc; | 312 | struct mv_xor_v2_sw_desc *sw_desc; |
| 313 | bool found = false; | ||
| 347 | 314 | ||
| 348 | /* Lock the channel */ | 315 | /* Lock the channel */ |
| 349 | spin_lock_bh(&xor_dev->lock); | 316 | spin_lock_bh(&xor_dev->lock); |
| @@ -355,19 +322,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) | |||
| 355 | return NULL; | 322 | return NULL; |
| 356 | } | 323 | } |
| 357 | 324 | ||
| 358 | /* get a free SW descriptor from the SW DESQ */ | 325 | list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) { |
| 359 | sw_desc = list_first_entry(&xor_dev->free_sw_desc, | 326 | if (async_tx_test_ack(&sw_desc->async_tx)) { |
| 360 | struct mv_xor_v2_sw_desc, free_list); | 327 | found = true; |
| 328 | break; | ||
| 329 | } | ||
| 330 | } | ||
| 331 | |||
| 332 | if (!found) { | ||
| 333 | spin_unlock_bh(&xor_dev->lock); | ||
| 334 | return NULL; | ||
| 335 | } | ||
| 336 | |||
| 361 | list_del(&sw_desc->free_list); | 337 | list_del(&sw_desc->free_list); |
| 362 | 338 | ||
| 363 | /* Release the channel */ | 339 | /* Release the channel */ |
| 364 | spin_unlock_bh(&xor_dev->lock); | 340 | spin_unlock_bh(&xor_dev->lock); |
| 365 | 341 | ||
| 366 | /* set the async tx descriptor */ | ||
| 367 | dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan); | ||
| 368 | sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; | ||
| 369 | async_tx_ack(&sw_desc->async_tx); | ||
| 370 | |||
| 371 | return sw_desc; | 342 | return sw_desc; |
| 372 | } | 343 | } |
| 373 | 344 | ||
| @@ -389,6 +360,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, | |||
| 389 | __func__, len, &src, &dest, flags); | 360 | __func__, len, &src, &dest, flags); |
| 390 | 361 | ||
| 391 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | 362 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); |
| 363 | if (!sw_desc) | ||
| 364 | return NULL; | ||
| 392 | 365 | ||
| 393 | sw_desc->async_tx.flags = flags; | 366 | sw_desc->async_tx.flags = flags; |
| 394 | 367 | ||
| @@ -443,6 +416,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
| 443 | __func__, src_cnt, len, &dest, flags); | 416 | __func__, src_cnt, len, &dest, flags); |
| 444 | 417 | ||
| 445 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | 418 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); |
| 419 | if (!sw_desc) | ||
| 420 | return NULL; | ||
| 446 | 421 | ||
| 447 | sw_desc->async_tx.flags = flags; | 422 | sw_desc->async_tx.flags = flags; |
| 448 | 423 | ||
| @@ -491,6 +466,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | |||
| 491 | container_of(chan, struct mv_xor_v2_device, dmachan); | 466 | container_of(chan, struct mv_xor_v2_device, dmachan); |
| 492 | 467 | ||
| 493 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | 468 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); |
| 469 | if (!sw_desc) | ||
| 470 | return NULL; | ||
| 494 | 471 | ||
| 495 | /* set the HW descriptor */ | 472 | /* set the HW descriptor */ |
| 496 | hw_descriptor = &sw_desc->hw_desc; | 473 | hw_descriptor = &sw_desc->hw_desc; |
| @@ -554,7 +531,6 @@ static void mv_xor_v2_tasklet(unsigned long data) | |||
| 554 | { | 531 | { |
| 555 | struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; | 532 | struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; |
| 556 | int pending_ptr, num_of_pending, i; | 533 | int pending_ptr, num_of_pending, i; |
| 557 | struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL; | ||
| 558 | struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; | 534 | struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; |
| 559 | 535 | ||
| 560 | dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); | 536 | dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); |
| @@ -562,17 +538,10 @@ static void mv_xor_v2_tasklet(unsigned long data) | |||
| 562 | /* get the pending descriptors parameters */ | 538 | /* get the pending descriptors parameters */ |
| 563 | num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); | 539 | num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); |
| 564 | 540 | ||
| 565 | /* next HW descriptor */ | ||
| 566 | next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr; | ||
| 567 | |||
| 568 | /* loop over free descriptors */ | 541 | /* loop over free descriptors */ |
| 569 | for (i = 0; i < num_of_pending; i++) { | 542 | for (i = 0; i < num_of_pending; i++) { |
| 570 | 543 | struct mv_xor_v2_descriptor *next_pending_hw_desc = | |
| 571 | if (pending_ptr > MV_XOR_V2_DESC_NUM) | 544 | xor_dev->hw_desq_virt + pending_ptr; |
| 572 | pending_ptr = 0; | ||
| 573 | |||
| 574 | if (next_pending_sw_desc != NULL) | ||
| 575 | next_pending_hw_desc++; | ||
| 576 | 545 | ||
| 577 | /* get the SW descriptor related to the HW descriptor */ | 546 | /* get the SW descriptor related to the HW descriptor */ |
| 578 | next_pending_sw_desc = | 547 | next_pending_sw_desc = |
| @@ -608,15 +577,14 @@ static void mv_xor_v2_tasklet(unsigned long data) | |||
| 608 | 577 | ||
| 609 | /* increment the next descriptor */ | 578 | /* increment the next descriptor */ |
| 610 | pending_ptr++; | 579 | pending_ptr++; |
| 580 | if (pending_ptr >= MV_XOR_V2_DESC_NUM) | ||
| 581 | pending_ptr = 0; | ||
| 611 | } | 582 | } |
| 612 | 583 | ||
| 613 | if (num_of_pending != 0) { | 584 | if (num_of_pending != 0) { |
| 614 | /* free the descriptores */ | 585 | /* free the descriptores */ |
| 615 | mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); | 586 | mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); |
| 616 | } | 587 | } |
| 617 | |||
| 618 | /* Update IMSG threshold, to enable new IMSG interrupts */ | ||
| 619 | mv_xor_v2_set_imsg_thrd(xor_dev, 0); | ||
| 620 | } | 588 | } |
| 621 | 589 | ||
| 622 | /* | 590 | /* |
| @@ -648,9 +616,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) | |||
| 648 | writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, | 616 | writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, |
| 649 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); | 617 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); |
| 650 | 618 | ||
| 651 | /* enable the DMA engine */ | ||
| 652 | writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); | ||
| 653 | |||
| 654 | /* | 619 | /* |
| 655 | * This is a temporary solution, until we activate the | 620 | * This is a temporary solution, until we activate the |
| 656 | * SMMU. Set the attributes for reading & writing data buffers | 621 | * SMMU. Set the attributes for reading & writing data buffers |
| @@ -694,6 +659,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) | |||
| 694 | reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; | 659 | reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; |
| 695 | writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); | 660 | writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); |
| 696 | 661 | ||
| 662 | /* enable the DMA engine */ | ||
| 663 | writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); | ||
| 664 | |||
| 697 | return 0; | 665 | return 0; |
| 698 | } | 666 | } |
| 699 | 667 | ||
| @@ -725,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev) | |||
| 725 | 693 | ||
| 726 | platform_set_drvdata(pdev, xor_dev); | 694 | platform_set_drvdata(pdev, xor_dev); |
| 727 | 695 | ||
| 696 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); | ||
| 697 | if (ret) | ||
| 698 | return ret; | ||
| 699 | |||
| 728 | xor_dev->clk = devm_clk_get(&pdev->dev, NULL); | 700 | xor_dev->clk = devm_clk_get(&pdev->dev, NULL); |
| 729 | if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) | 701 | if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) |
| 730 | return -EPROBE_DEFER; | 702 | return -EPROBE_DEFER; |
| @@ -785,8 +757,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev) | |||
| 785 | 757 | ||
| 786 | /* add all SW descriptors to the free list */ | 758 | /* add all SW descriptors to the free list */ |
| 787 | for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { | 759 | for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { |
| 788 | xor_dev->sw_desq[i].idx = i; | 760 | struct mv_xor_v2_sw_desc *sw_desc = |
| 789 | list_add(&xor_dev->sw_desq[i].free_list, | 761 | xor_dev->sw_desq + i; |
| 762 | sw_desc->idx = i; | ||
| 763 | dma_async_tx_descriptor_init(&sw_desc->async_tx, | ||
| 764 | &xor_dev->dmachan); | ||
| 765 | sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; | ||
| 766 | async_tx_ack(&sw_desc->async_tx); | ||
| 767 | |||
| 768 | list_add(&sw_desc->free_list, | ||
| 790 | &xor_dev->free_sw_desc); | 769 | &xor_dev->free_sw_desc); |
| 791 | } | 770 | } |
| 792 | 771 | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 8b0da7fa520d..e90a7a0d760a 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
| @@ -3008,7 +3008,8 @@ static int pl330_remove(struct amba_device *adev) | |||
| 3008 | 3008 | ||
| 3009 | for (i = 0; i < AMBA_NR_IRQS; i++) { | 3009 | for (i = 0; i < AMBA_NR_IRQS; i++) { |
| 3010 | irq = adev->irq[i]; | 3010 | irq = adev->irq[i]; |
| 3011 | devm_free_irq(&adev->dev, irq, pl330); | 3011 | if (irq) |
| 3012 | devm_free_irq(&adev->dev, irq, pl330); | ||
| 3012 | } | 3013 | } |
| 3013 | 3014 | ||
| 3014 | dma_async_device_unregister(&pl330->ddma); | 3015 | dma_async_device_unregister(&pl330->ddma); |
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index db41795fe42a..bd261c9e9664 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c | |||
| @@ -1287,6 +1287,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, | |||
| 1287 | if (desc->hwdescs.use) { | 1287 | if (desc->hwdescs.use) { |
| 1288 | dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & | 1288 | dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & |
| 1289 | RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; | 1289 | RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; |
| 1290 | if (dptr == 0) | ||
| 1291 | dptr = desc->nchunks; | ||
| 1292 | dptr--; | ||
| 1290 | WARN_ON(dptr >= desc->nchunks); | 1293 | WARN_ON(dptr >= desc->nchunks); |
| 1291 | } else { | 1294 | } else { |
| 1292 | running = desc->running; | 1295 | running = desc->running; |
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 72c649713ace..31a145154e9f 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c | |||
| @@ -117,7 +117,7 @@ struct usb_dmac { | |||
| 117 | #define USB_DMASWR 0x0008 | 117 | #define USB_DMASWR 0x0008 |
| 118 | #define USB_DMASWR_SWR (1 << 0) | 118 | #define USB_DMASWR_SWR (1 << 0) |
| 119 | #define USB_DMAOR 0x0060 | 119 | #define USB_DMAOR 0x0060 |
| 120 | #define USB_DMAOR_AE (1 << 2) | 120 | #define USB_DMAOR_AE (1 << 1) |
| 121 | #define USB_DMAOR_DME (1 << 0) | 121 | #define USB_DMAOR_DME (1 << 0) |
| 122 | 122 | ||
| 123 | #define USB_DMASAR 0x0000 | 123 | #define USB_DMASAR 0x0000 |
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c index 44c01390d035..951b6c79f166 100644 --- a/drivers/firmware/dmi-id.c +++ b/drivers/firmware/dmi-id.c | |||
| @@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME); | |||
| 47 | DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); | 47 | DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); |
| 48 | DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); | 48 | DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); |
| 49 | DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); | 49 | DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); |
| 50 | DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY); | ||
| 50 | DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); | 51 | DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); |
| 51 | DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); | 52 | DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); |
| 52 | DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION); | 53 | DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION); |
| @@ -191,6 +192,7 @@ static void __init dmi_id_init_attr_table(void) | |||
| 191 | ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION); | 192 | ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION); |
| 192 | ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); | 193 | ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); |
| 193 | ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); | 194 | ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); |
| 195 | ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY); | ||
| 194 | ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); | 196 | ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); |
| 195 | ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); | 197 | ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); |
| 196 | ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION); | 198 | ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION); |
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 54be60ead08f..783041964439 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
| @@ -144,7 +144,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, | |||
| 144 | 144 | ||
| 145 | buf = dmi_early_remap(dmi_base, orig_dmi_len); | 145 | buf = dmi_early_remap(dmi_base, orig_dmi_len); |
| 146 | if (buf == NULL) | 146 | if (buf == NULL) |
| 147 | return -1; | 147 | return -ENOMEM; |
| 148 | 148 | ||
| 149 | dmi_decode_table(buf, decode, NULL); | 149 | dmi_decode_table(buf, decode, NULL); |
| 150 | 150 | ||
| @@ -178,7 +178,7 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot, | |||
| 178 | const char *d = (const char *) dm; | 178 | const char *d = (const char *) dm; |
| 179 | const char *p; | 179 | const char *p; |
| 180 | 180 | ||
| 181 | if (dmi_ident[slot]) | 181 | if (dmi_ident[slot] || dm->length <= string) |
| 182 | return; | 182 | return; |
| 183 | 183 | ||
| 184 | p = dmi_string(dm, d[string]); | 184 | p = dmi_string(dm, d[string]); |
| @@ -191,13 +191,14 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot, | |||
| 191 | static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, | 191 | static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, |
| 192 | int index) | 192 | int index) |
| 193 | { | 193 | { |
| 194 | const u8 *d = (u8 *) dm + index; | 194 | const u8 *d; |
| 195 | char *s; | 195 | char *s; |
| 196 | int is_ff = 1, is_00 = 1, i; | 196 | int is_ff = 1, is_00 = 1, i; |
| 197 | 197 | ||
| 198 | if (dmi_ident[slot]) | 198 | if (dmi_ident[slot] || dm->length <= index + 16) |
| 199 | return; | 199 | return; |
| 200 | 200 | ||
| 201 | d = (u8 *) dm + index; | ||
| 201 | for (i = 0; i < 16 && (is_ff || is_00); i++) { | 202 | for (i = 0; i < 16 && (is_ff || is_00); i++) { |
| 202 | if (d[i] != 0x00) | 203 | if (d[i] != 0x00) |
| 203 | is_00 = 0; | 204 | is_00 = 0; |
| @@ -228,16 +229,17 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, | |||
| 228 | static void __init dmi_save_type(const struct dmi_header *dm, int slot, | 229 | static void __init dmi_save_type(const struct dmi_header *dm, int slot, |
| 229 | int index) | 230 | int index) |
| 230 | { | 231 | { |
| 231 | const u8 *d = (u8 *) dm + index; | 232 | const u8 *d; |
| 232 | char *s; | 233 | char *s; |
| 233 | 234 | ||
| 234 | if (dmi_ident[slot]) | 235 | if (dmi_ident[slot] || dm->length <= index) |
| 235 | return; | 236 | return; |
| 236 | 237 | ||
| 237 | s = dmi_alloc(4); | 238 | s = dmi_alloc(4); |
| 238 | if (!s) | 239 | if (!s) |
| 239 | return; | 240 | return; |
| 240 | 241 | ||
| 242 | d = (u8 *) dm + index; | ||
| 241 | sprintf(s, "%u", *d & 0x7F); | 243 | sprintf(s, "%u", *d & 0x7F); |
| 242 | dmi_ident[slot] = s; | 244 | dmi_ident[slot] = s; |
| 243 | } | 245 | } |
| @@ -278,9 +280,13 @@ static void __init dmi_save_devices(const struct dmi_header *dm) | |||
| 278 | 280 | ||
| 279 | static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) | 281 | static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) |
| 280 | { | 282 | { |
| 281 | int i, count = *(u8 *)(dm + 1); | 283 | int i, count; |
| 282 | struct dmi_device *dev; | 284 | struct dmi_device *dev; |
| 283 | 285 | ||
| 286 | if (dm->length < 0x05) | ||
| 287 | return; | ||
| 288 | |||
| 289 | count = *(u8 *)(dm + 1); | ||
| 284 | for (i = 1; i <= count; i++) { | 290 | for (i = 1; i <= count; i++) { |
| 285 | const char *devname = dmi_string(dm, i); | 291 | const char *devname = dmi_string(dm, i); |
| 286 | 292 | ||
| @@ -353,6 +359,9 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm) | |||
| 353 | const char *name; | 359 | const char *name; |
| 354 | const u8 *d = (u8 *)dm; | 360 | const u8 *d = (u8 *)dm; |
| 355 | 361 | ||
| 362 | if (dm->length < 0x0B) | ||
| 363 | return; | ||
| 364 | |||
| 356 | /* Skip disabled device */ | 365 | /* Skip disabled device */ |
| 357 | if ((d[0x5] & 0x80) == 0) | 366 | if ((d[0x5] & 0x80) == 0) |
| 358 | return; | 367 | return; |
| @@ -387,7 +396,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v) | |||
| 387 | const char *d = (const char *)dm; | 396 | const char *d = (const char *)dm; |
| 388 | static int nr; | 397 | static int nr; |
| 389 | 398 | ||
| 390 | if (dm->type != DMI_ENTRY_MEM_DEVICE) | 399 | if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12) |
| 391 | return; | 400 | return; |
| 392 | if (nr >= dmi_memdev_nr) { | 401 | if (nr >= dmi_memdev_nr) { |
| 393 | pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n"); | 402 | pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n"); |
| @@ -430,6 +439,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy) | |||
| 430 | dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); | 439 | dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); |
| 431 | dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7); | 440 | dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7); |
| 432 | dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8); | 441 | dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8); |
| 442 | dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26); | ||
| 433 | break; | 443 | break; |
| 434 | case 2: /* Base Board Information */ | 444 | case 2: /* Base Board Information */ |
| 435 | dmi_save_ident(dm, DMI_BOARD_VENDOR, 4); | 445 | dmi_save_ident(dm, DMI_BOARD_VENDOR, 4); |
| @@ -649,6 +659,21 @@ void __init dmi_scan_machine(void) | |||
| 649 | goto error; | 659 | goto error; |
| 650 | 660 | ||
| 651 | /* | 661 | /* |
| 662 | * Same logic as above, look for a 64-bit entry point | ||
| 663 | * first, and if not found, fall back to 32-bit entry point. | ||
| 664 | */ | ||
| 665 | memcpy_fromio(buf, p, 16); | ||
| 666 | for (q = p + 16; q < p + 0x10000; q += 16) { | ||
| 667 | memcpy_fromio(buf + 16, q, 16); | ||
| 668 | if (!dmi_smbios3_present(buf)) { | ||
| 669 | dmi_available = 1; | ||
| 670 | dmi_early_unmap(p, 0x10000); | ||
| 671 | goto out; | ||
| 672 | } | ||
| 673 | memcpy(buf, buf + 16, 16); | ||
| 674 | } | ||
| 675 | |||
| 676 | /* | ||
| 652 | * Iterate over all possible DMI header addresses q. | 677 | * Iterate over all possible DMI header addresses q. |
| 653 | * Maintain the 32 bytes around q in buf. On the | 678 | * Maintain the 32 bytes around q in buf. On the |
| 654 | * first iteration, substitute zero for the | 679 | * first iteration, substitute zero for the |
| @@ -658,7 +683,7 @@ void __init dmi_scan_machine(void) | |||
| 658 | memset(buf, 0, 16); | 683 | memset(buf, 0, 16); |
| 659 | for (q = p; q < p + 0x10000; q += 16) { | 684 | for (q = p; q < p + 0x10000; q += 16) { |
| 660 | memcpy_fromio(buf + 16, q, 16); | 685 | memcpy_fromio(buf + 16, q, 16); |
| 661 | if (!dmi_smbios3_present(buf) || !dmi_present(buf)) { | 686 | if (!dmi_present(buf)) { |
| 662 | dmi_available = 1; | 687 | dmi_available = 1; |
| 663 | dmi_early_unmap(p, 0x10000); | 688 | dmi_early_unmap(p, 0x10000); |
| 664 | goto out; | 689 | goto out; |
| @@ -992,7 +1017,8 @@ EXPORT_SYMBOL(dmi_get_date); | |||
| 992 | * @decode: Callback function | 1017 | * @decode: Callback function |
| 993 | * @private_data: Private data to be passed to the callback function | 1018 | * @private_data: Private data to be passed to the callback function |
| 994 | * | 1019 | * |
| 995 | * Returns -1 when the DMI table can't be reached, 0 on success. | 1020 | * Returns 0 on success, -ENXIO if DMI is not selected or not present, |
| 1021 | * or a different negative error code if DMI walking fails. | ||
| 996 | */ | 1022 | */ |
| 997 | int dmi_walk(void (*decode)(const struct dmi_header *, void *), | 1023 | int dmi_walk(void (*decode)(const struct dmi_header *, void *), |
| 998 | void *private_data) | 1024 | void *private_data) |
| @@ -1000,11 +1026,11 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *), | |||
| 1000 | u8 *buf; | 1026 | u8 *buf; |
| 1001 | 1027 | ||
| 1002 | if (!dmi_available) | 1028 | if (!dmi_available) |
| 1003 | return -1; | 1029 | return -ENXIO; |
| 1004 | 1030 | ||
| 1005 | buf = dmi_remap(dmi_base, dmi_len); | 1031 | buf = dmi_remap(dmi_base, dmi_len); |
| 1006 | if (buf == NULL) | 1032 | if (buf == NULL) |
| 1007 | return -1; | 1033 | return -ENOMEM; |
| 1008 | 1034 | ||
| 1009 | dmi_decode_table(buf, decode, private_data); | 1035 | dmi_decode_table(buf, decode, private_data); |
| 1010 | 1036 | ||
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c index 04ca8764f0c0..b58233e4ed71 100644 --- a/drivers/firmware/efi/efi-bgrt.c +++ b/drivers/firmware/efi/efi-bgrt.c | |||
| @@ -27,6 +27,26 @@ struct bmp_header { | |||
| 27 | u32 size; | 27 | u32 size; |
| 28 | } __packed; | 28 | } __packed; |
| 29 | 29 | ||
| 30 | static bool efi_bgrt_addr_valid(u64 addr) | ||
| 31 | { | ||
| 32 | efi_memory_desc_t *md; | ||
| 33 | |||
| 34 | for_each_efi_memory_desc(md) { | ||
| 35 | u64 size; | ||
| 36 | u64 end; | ||
| 37 | |||
| 38 | if (md->type != EFI_BOOT_SERVICES_DATA) | ||
| 39 | continue; | ||
| 40 | |||
| 41 | size = md->num_pages << EFI_PAGE_SHIFT; | ||
| 42 | end = md->phys_addr + size; | ||
| 43 | if (addr >= md->phys_addr && addr < end) | ||
| 44 | return true; | ||
| 45 | } | ||
| 46 | |||
| 47 | return false; | ||
| 48 | } | ||
| 49 | |||
| 30 | void __init efi_bgrt_init(struct acpi_table_header *table) | 50 | void __init efi_bgrt_init(struct acpi_table_header *table) |
| 31 | { | 51 | { |
| 32 | void *image; | 52 | void *image; |
| @@ -36,6 +56,9 @@ void __init efi_bgrt_init(struct acpi_table_header *table) | |||
| 36 | if (acpi_disabled) | 56 | if (acpi_disabled) |
| 37 | return; | 57 | return; |
| 38 | 58 | ||
| 59 | if (!efi_enabled(EFI_MEMMAP)) | ||
| 60 | return; | ||
| 61 | |||
| 39 | if (table->length < sizeof(bgrt_tab)) { | 62 | if (table->length < sizeof(bgrt_tab)) { |
| 40 | pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n", | 63 | pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n", |
| 41 | table->length, sizeof(bgrt_tab)); | 64 | table->length, sizeof(bgrt_tab)); |
| @@ -62,6 +85,10 @@ void __init efi_bgrt_init(struct acpi_table_header *table) | |||
| 62 | goto out; | 85 | goto out; |
| 63 | } | 86 | } |
| 64 | 87 | ||
| 88 | if (!efi_bgrt_addr_valid(bgrt->image_address)) { | ||
| 89 | pr_notice("Ignoring BGRT: invalid image address\n"); | ||
| 90 | goto out; | ||
| 91 | } | ||
| 65 | image = early_memremap(bgrt->image_address, sizeof(bmp_header)); | 92 | image = early_memremap(bgrt->image_address, sizeof(bmp_header)); |
| 66 | if (!image) { | 93 | if (!image) { |
| 67 | pr_notice("Ignoring BGRT: failed to map image header memory\n"); | 94 | pr_notice("Ignoring BGRT: failed to map image header memory\n"); |
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index 8c34d50a4d80..959777ec8a77 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c | |||
| @@ -16,10 +16,10 @@ | |||
| 16 | 16 | ||
| 17 | /* BIOS variables */ | 17 | /* BIOS variables */ |
| 18 | static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID; | 18 | static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID; |
| 19 | static const efi_char16_t const efi_SecureBoot_name[] = { | 19 | static const efi_char16_t efi_SecureBoot_name[] = { |
| 20 | 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 | 20 | 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 |
| 21 | }; | 21 | }; |
| 22 | static const efi_char16_t const efi_SetupMode_name[] = { | 22 | static const efi_char16_t efi_SetupMode_name[] = { |
| 23 | 'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 | 23 | 'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 |
| 24 | }; | 24 | }; |
| 25 | 25 | ||
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c index 1e7860f02f4f..31058d400bda 100644 --- a/drivers/firmware/google/vpd.c +++ b/drivers/firmware/google/vpd.c | |||
| @@ -136,12 +136,12 @@ static int vpd_section_attrib_add(const u8 *key, s32 key_len, | |||
| 136 | info->value = value; | 136 | info->value = value; |
| 137 | 137 | ||
| 138 | INIT_LIST_HEAD(&info->list); | 138 | INIT_LIST_HEAD(&info->list); |
| 139 | list_add_tail(&info->list, &sec->attribs); | ||
| 140 | 139 | ||
| 141 | ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr); | 140 | ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr); |
| 142 | if (ret) | 141 | if (ret) |
| 143 | goto free_info_key; | 142 | goto free_info_key; |
| 144 | 143 | ||
| 144 | list_add_tail(&info->list, &sec->attribs); | ||
| 145 | return 0; | 145 | return 0; |
| 146 | 146 | ||
| 147 | free_info_key: | 147 | free_info_key: |
| @@ -158,8 +158,8 @@ static void vpd_section_attrib_destroy(struct vpd_section *sec) | |||
| 158 | struct vpd_attrib_info *temp; | 158 | struct vpd_attrib_info *temp; |
| 159 | 159 | ||
| 160 | list_for_each_entry_safe(info, temp, &sec->attribs, list) { | 160 | list_for_each_entry_safe(info, temp, &sec->attribs, list) { |
| 161 | kfree(info->key); | ||
| 162 | sysfs_remove_bin_file(sec->kobj, &info->bin_attr); | 161 | sysfs_remove_bin_file(sec->kobj, &info->bin_attr); |
| 162 | kfree(info->key); | ||
| 163 | kfree(info); | 163 | kfree(info); |
| 164 | } | 164 | } |
| 165 | } | 165 | } |
| @@ -244,7 +244,7 @@ static int vpd_section_destroy(struct vpd_section *sec) | |||
| 244 | { | 244 | { |
| 245 | if (sec->enabled) { | 245 | if (sec->enabled) { |
| 246 | vpd_section_attrib_destroy(sec); | 246 | vpd_section_attrib_destroy(sec); |
| 247 | kobject_del(sec->kobj); | 247 | kobject_put(sec->kobj); |
| 248 | sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr); | 248 | sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr); |
| 249 | kfree(sec->raw_name); | 249 | kfree(sec->raw_name); |
| 250 | iounmap(sec->baseaddr); | 250 | iounmap(sec->baseaddr); |
| @@ -331,7 +331,7 @@ static void __exit vpd_platform_exit(void) | |||
| 331 | { | 331 | { |
| 332 | vpd_section_destroy(&ro_vpd); | 332 | vpd_section_destroy(&ro_vpd); |
| 333 | vpd_section_destroy(&rw_vpd); | 333 | vpd_section_destroy(&rw_vpd); |
| 334 | kobject_del(vpd_kobj); | 334 | kobject_put(vpd_kobj); |
| 335 | } | 335 | } |
| 336 | 336 | ||
| 337 | module_init(vpd_platform_init); | 337 | module_init(vpd_platform_init); |
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index ccea609676ee..4ca436e66bdb 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c | |||
| @@ -646,6 +646,9 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset, | |||
| 646 | int rc; | 646 | int rc; |
| 647 | int i; | 647 | int i; |
| 648 | 648 | ||
| 649 | if (!gpio->clk) | ||
| 650 | return -EINVAL; | ||
| 651 | |||
| 649 | rc = usecs_to_cycles(gpio, usecs, &requested_cycles); | 652 | rc = usecs_to_cycles(gpio, usecs, &requested_cycles); |
| 650 | if (rc < 0) { | 653 | if (rc < 0) { |
| 651 | dev_warn(chip->parent, "Failed to convert %luus to cycles at %luHz: %d\n", | 654 | dev_warn(chip->parent, "Failed to convert %luus to cycles at %luHz: %d\n", |
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c index 2197368cc899..e60156ec0c18 100644 --- a/drivers/gpio/gpio-crystalcove.c +++ b/drivers/gpio/gpio-crystalcove.c | |||
| @@ -90,8 +90,18 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type) | |||
| 90 | { | 90 | { |
| 91 | int reg; | 91 | int reg; |
| 92 | 92 | ||
| 93 | if (gpio == 94) | 93 | if (gpio >= CRYSTALCOVE_GPIO_NUM) { |
| 94 | return GPIOPANELCTL; | 94 | /* |
| 95 | * Virtual GPIO called from ACPI, for now we only support | ||
| 96 | * the panel ctl. | ||
| 97 | */ | ||
| 98 | switch (gpio) { | ||
| 99 | case 0x5e: | ||
| 100 | return GPIOPANELCTL; | ||
| 101 | default: | ||
| 102 | return -EOPNOTSUPP; | ||
| 103 | } | ||
| 104 | } | ||
| 95 | 105 | ||
| 96 | if (reg_type == CTRL_IN) { | 106 | if (reg_type == CTRL_IN) { |
| 97 | if (gpio < 8) | 107 | if (gpio < 8) |
| @@ -130,36 +140,36 @@ static void crystalcove_update_irq_ctrl(struct crystalcove_gpio *cg, int gpio) | |||
| 130 | static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio) | 140 | static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio) |
| 131 | { | 141 | { |
| 132 | struct crystalcove_gpio *cg = gpiochip_get_data(chip); | 142 | struct crystalcove_gpio *cg = gpiochip_get_data(chip); |
| 143 | int reg = to_reg(gpio, CTRL_OUT); | ||
| 133 | 144 | ||
| 134 | if (gpio > CRYSTALCOVE_VGPIO_NUM) | 145 | if (reg < 0) |
| 135 | return 0; | 146 | return 0; |
| 136 | 147 | ||
| 137 | return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT), | 148 | return regmap_write(cg->regmap, reg, CTLO_INPUT_SET); |
| 138 | CTLO_INPUT_SET); | ||
| 139 | } | 149 | } |
| 140 | 150 | ||
| 141 | static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio, | 151 | static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio, |
| 142 | int value) | 152 | int value) |
| 143 | { | 153 | { |
| 144 | struct crystalcove_gpio *cg = gpiochip_get_data(chip); | 154 | struct crystalcove_gpio *cg = gpiochip_get_data(chip); |
| 155 | int reg = to_reg(gpio, CTRL_OUT); | ||
| 145 | 156 | ||
| 146 | if (gpio > CRYSTALCOVE_VGPIO_NUM) | 157 | if (reg < 0) |
| 147 | return 0; | 158 | return 0; |
| 148 | 159 | ||
| 149 | return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT), | 160 | return regmap_write(cg->regmap, reg, CTLO_OUTPUT_SET | value); |
| 150 | CTLO_OUTPUT_SET | value); | ||
| 151 | } | 161 | } |
| 152 | 162 | ||
| 153 | static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio) | 163 | static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio) |
| 154 | { | 164 | { |
| 155 | struct crystalcove_gpio *cg = gpiochip_get_data(chip); | 165 | struct crystalcove_gpio *cg = gpiochip_get_data(chip); |
| 156 | int ret; | ||
| 157 | unsigned int val; | 166 | unsigned int val; |
| 167 | int ret, reg = to_reg(gpio, CTRL_IN); | ||
| 158 | 168 | ||
| 159 | if (gpio > CRYSTALCOVE_VGPIO_NUM) | 169 | if (reg < 0) |
| 160 | return 0; | 170 | return 0; |
| 161 | 171 | ||
| 162 | ret = regmap_read(cg->regmap, to_reg(gpio, CTRL_IN), &val); | 172 | ret = regmap_read(cg->regmap, reg, &val); |
| 163 | if (ret) | 173 | if (ret) |
| 164 | return ret; | 174 | return ret; |
| 165 | 175 | ||
| @@ -170,14 +180,15 @@ static void crystalcove_gpio_set(struct gpio_chip *chip, | |||
| 170 | unsigned gpio, int value) | 180 | unsigned gpio, int value) |
| 171 | { | 181 | { |
| 172 | struct crystalcove_gpio *cg = gpiochip_get_data(chip); | 182 | struct crystalcove_gpio *cg = gpiochip_get_data(chip); |
| 183 | int reg = to_reg(gpio, CTRL_OUT); | ||
| 173 | 184 | ||
| 174 | if (gpio > CRYSTALCOVE_VGPIO_NUM) | 185 | if (reg < 0) |
| 175 | return; | 186 | return; |
| 176 | 187 | ||
| 177 | if (value) | 188 | if (value) |
| 178 | regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 1); | 189 | regmap_update_bits(cg->regmap, reg, 1, 1); |
| 179 | else | 190 | else |
| 180 | regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 0); | 191 | regmap_update_bits(cg->regmap, reg, 1, 0); |
| 181 | } | 192 | } |
| 182 | 193 | ||
| 183 | static int crystalcove_irq_type(struct irq_data *data, unsigned type) | 194 | static int crystalcove_irq_type(struct irq_data *data, unsigned type) |
| @@ -185,6 +196,9 @@ static int crystalcove_irq_type(struct irq_data *data, unsigned type) | |||
| 185 | struct crystalcove_gpio *cg = | 196 | struct crystalcove_gpio *cg = |
| 186 | gpiochip_get_data(irq_data_get_irq_chip_data(data)); | 197 | gpiochip_get_data(irq_data_get_irq_chip_data(data)); |
| 187 | 198 | ||
| 199 | if (data->hwirq >= CRYSTALCOVE_GPIO_NUM) | ||
| 200 | return 0; | ||
| 201 | |||
| 188 | switch (type) { | 202 | switch (type) { |
| 189 | case IRQ_TYPE_NONE: | 203 | case IRQ_TYPE_NONE: |
| 190 | cg->intcnt_value = CTLI_INTCNT_DIS; | 204 | cg->intcnt_value = CTLI_INTCNT_DIS; |
| @@ -235,8 +249,10 @@ static void crystalcove_irq_unmask(struct irq_data *data) | |||
| 235 | struct crystalcove_gpio *cg = | 249 | struct crystalcove_gpio *cg = |
| 236 | gpiochip_get_data(irq_data_get_irq_chip_data(data)); | 250 | gpiochip_get_data(irq_data_get_irq_chip_data(data)); |
| 237 | 251 | ||
| 238 | cg->set_irq_mask = false; | 252 | if (data->hwirq < CRYSTALCOVE_GPIO_NUM) { |
| 239 | cg->update |= UPDATE_IRQ_MASK; | 253 | cg->set_irq_mask = false; |
| 254 | cg->update |= UPDATE_IRQ_MASK; | ||
| 255 | } | ||
| 240 | } | 256 | } |
| 241 | 257 | ||
| 242 | static void crystalcove_irq_mask(struct irq_data *data) | 258 | static void crystalcove_irq_mask(struct irq_data *data) |
| @@ -244,8 +260,10 @@ static void crystalcove_irq_mask(struct irq_data *data) | |||
| 244 | struct crystalcove_gpio *cg = | 260 | struct crystalcove_gpio *cg = |
| 245 | gpiochip_get_data(irq_data_get_irq_chip_data(data)); | 261 | gpiochip_get_data(irq_data_get_irq_chip_data(data)); |
| 246 | 262 | ||
| 247 | cg->set_irq_mask = true; | 263 | if (data->hwirq < CRYSTALCOVE_GPIO_NUM) { |
| 248 | cg->update |= UPDATE_IRQ_MASK; | 264 | cg->set_irq_mask = true; |
| 265 | cg->update |= UPDATE_IRQ_MASK; | ||
| 266 | } | ||
| 249 | } | 267 | } |
| 250 | 268 | ||
| 251 | static struct irq_chip crystalcove_irqchip = { | 269 | static struct irq_chip crystalcove_irqchip = { |
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 19a92efabbef..c83ea68be792 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c | |||
| @@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev, | |||
| 721 | u32 set; | 721 | u32 set; |
| 722 | 722 | ||
| 723 | if (!of_device_is_compatible(mvchip->chip.of_node, | 723 | if (!of_device_is_compatible(mvchip->chip.of_node, |
| 724 | "marvell,armada-370-xp-gpio")) | 724 | "marvell,armada-370-gpio")) |
| 725 | return 0; | 725 | return 0; |
| 726 | 726 | ||
| 727 | if (IS_ERR(mvchip->clk)) | 727 | if (IS_ERR(mvchip->clk)) |
| @@ -747,7 +747,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev, | |||
| 747 | set = U32_MAX; | 747 | set = U32_MAX; |
| 748 | else | 748 | else |
| 749 | return -EINVAL; | 749 | return -EINVAL; |
| 750 | writel_relaxed(0, mvebu_gpioreg_blink_counter_select(mvchip)); | 750 | writel_relaxed(set, mvebu_gpioreg_blink_counter_select(mvchip)); |
| 751 | 751 | ||
| 752 | mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL); | 752 | mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL); |
| 753 | if (!mvpwm) | 753 | if (!mvpwm) |
| @@ -768,6 +768,13 @@ static int mvebu_pwm_probe(struct platform_device *pdev, | |||
| 768 | mvpwm->chip.dev = dev; | 768 | mvpwm->chip.dev = dev; |
| 769 | mvpwm->chip.ops = &mvebu_pwm_ops; | 769 | mvpwm->chip.ops = &mvebu_pwm_ops; |
| 770 | mvpwm->chip.npwm = mvchip->chip.ngpio; | 770 | mvpwm->chip.npwm = mvchip->chip.ngpio; |
| 771 | /* | ||
| 772 | * There may already be some PWM allocated, so we can't force | ||
| 773 | * mvpwm->chip.base to a fixed point like mvchip->chip.base. | ||
| 774 | * So, we let pwmchip_add() do the numbering and take the next free | ||
| 775 | * region. | ||
| 776 | */ | ||
| 777 | mvpwm->chip.base = -1; | ||
| 771 | 778 | ||
| 772 | spin_lock_init(&mvpwm->lock); | 779 | spin_lock_init(&mvpwm->lock); |
| 773 | 780 | ||
| @@ -845,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = { | |||
| 845 | .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP, | 852 | .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP, |
| 846 | }, | 853 | }, |
| 847 | { | 854 | { |
| 848 | .compatible = "marvell,armada-370-xp-gpio", | 855 | .compatible = "marvell,armada-370-gpio", |
| 849 | .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, | 856 | .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, |
| 850 | }, | 857 | }, |
| 851 | { | 858 | { |
| @@ -1121,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev) | |||
| 1121 | mvchip); | 1128 | mvchip); |
| 1122 | } | 1129 | } |
| 1123 | 1130 | ||
| 1124 | /* Armada 370/XP has simple PWM support for GPIO lines */ | 1131 | /* Some MVEBU SoCs have simple PWM support for GPIO lines */ |
| 1125 | if (IS_ENABLED(CONFIG_PWM)) | 1132 | if (IS_ENABLED(CONFIG_PWM)) |
| 1126 | return mvebu_pwm_probe(pdev, mvchip, id); | 1133 | return mvebu_pwm_probe(pdev, mvchip, id); |
| 1127 | 1134 | ||
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 2185232da823..8fa5fcd00e9a 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
| @@ -201,7 +201,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
| 201 | handler = acpi_gpio_irq_handler_evt; | 201 | handler = acpi_gpio_irq_handler_evt; |
| 202 | } | 202 | } |
| 203 | if (!handler) | 203 | if (!handler) |
| 204 | return AE_BAD_PARAMETER; | 204 | return AE_OK; |
| 205 | 205 | ||
| 206 | pin = acpi_gpiochip_pin_to_gpio_offset(chip->gpiodev, pin); | 206 | pin = acpi_gpiochip_pin_to_gpio_offset(chip->gpiodev, pin); |
| 207 | if (pin < 0) | 207 | if (pin < 0) |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 5db44139cef8..a42a1eea5714 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -708,7 +708,8 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) | |||
| 708 | 708 | ||
| 709 | ge.timestamp = ktime_get_real_ns(); | 709 | ge.timestamp = ktime_get_real_ns(); |
| 710 | 710 | ||
| 711 | if (le->eflags & GPIOEVENT_REQUEST_BOTH_EDGES) { | 711 | if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE |
| 712 | && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { | ||
| 712 | int level = gpiod_get_value_cansleep(le->desc); | 713 | int level = gpiod_get_value_cansleep(le->desc); |
| 713 | 714 | ||
| 714 | if (level) | 715 | if (level) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 1cf78f4dd339..1e8e1123ddf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | |||
| @@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) | |||
| 693 | DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", | 693 | DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", |
| 694 | adev->clock.default_dispclk / 100); | 694 | adev->clock.default_dispclk / 100); |
| 695 | adev->clock.default_dispclk = 60000; | 695 | adev->clock.default_dispclk = 60000; |
| 696 | } else if (adev->clock.default_dispclk <= 60000) { | ||
| 697 | DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n", | ||
| 698 | adev->clock.default_dispclk / 100); | ||
| 699 | adev->clock.default_dispclk = 62500; | ||
| 696 | } | 700 | } |
| 697 | adev->clock.dp_extclk = | 701 | adev->clock.dp_extclk = |
| 698 | le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); | 702 | le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f2d705e6a75a..ab6b0d0febab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
| @@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = { | |||
| 449 | {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 449 | {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
| 450 | {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 450 | {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
| 451 | {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 451 | {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
| 452 | {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | ||
| 452 | {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 453 | {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
| 453 | /* Vega 10 */ | 454 | /* Vega 10 */ |
| 454 | {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, | 455 | {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index a4831fe0223b..a2c59a08b2bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | |||
| @@ -220,9 +220,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, | |||
| 220 | } | 220 | } |
| 221 | 221 | ||
| 222 | const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { | 222 | const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { |
| 223 | amdgpu_vram_mgr_init, | 223 | .init = amdgpu_vram_mgr_init, |
| 224 | amdgpu_vram_mgr_fini, | 224 | .takedown = amdgpu_vram_mgr_fini, |
| 225 | amdgpu_vram_mgr_new, | 225 | .get_node = amdgpu_vram_mgr_new, |
| 226 | amdgpu_vram_mgr_del, | 226 | .put_node = amdgpu_vram_mgr_del, |
| 227 | amdgpu_vram_mgr_debug | 227 | .debug = amdgpu_vram_mgr_debug |
| 228 | }; | 228 | }; |
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index 8c9bc75a9c2d..8a0818b23ea4 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c | |||
| @@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) | |||
| 165 | struct drm_device *dev = crtc->dev; | 165 | struct drm_device *dev = crtc->dev; |
| 166 | struct amdgpu_device *adev = dev->dev_private; | 166 | struct amdgpu_device *adev = dev->dev_private; |
| 167 | int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); | 167 | int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); |
| 168 | ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; | 168 | ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; |
| 169 | 169 | ||
| 170 | memset(&args, 0, sizeof(args)); | 170 | memset(&args, 0, sizeof(args)); |
| 171 | 171 | ||
| @@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) | |||
| 178 | void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) | 178 | void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) |
| 179 | { | 179 | { |
| 180 | int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); | 180 | int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); |
| 181 | ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; | 181 | ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; |
| 182 | 182 | ||
| 183 | memset(&args, 0, sizeof(args)); | 183 | memset(&args, 0, sizeof(args)); |
| 184 | 184 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 0cdeb6a2e4a0..5dffa27afa45 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
| @@ -1207,8 +1207,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, | |||
| 1207 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; | 1207 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
| 1208 | 1208 | ||
| 1209 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | 1209 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
| 1210 | active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; | 1210 | active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, |
| 1211 | line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); | 1211 | (u32)mode->clock); |
| 1212 | line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, | ||
| 1213 | (u32)mode->clock); | ||
| 1214 | line_time = min(line_time, (u32)65535); | ||
| 1212 | 1215 | ||
| 1213 | /* watermark for high clocks */ | 1216 | /* watermark for high clocks */ |
| 1214 | if (adev->pm.dpm_enabled) { | 1217 | if (adev->pm.dpm_enabled) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 773654a19749..47bbc87f96d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
| @@ -1176,8 +1176,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, | |||
| 1176 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; | 1176 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
| 1177 | 1177 | ||
| 1178 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | 1178 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
| 1179 | active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; | 1179 | active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, |
| 1180 | line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); | 1180 | (u32)mode->clock); |
| 1181 | line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, | ||
| 1182 | (u32)mode->clock); | ||
| 1183 | line_time = min(line_time, (u32)65535); | ||
| 1181 | 1184 | ||
| 1182 | /* watermark for high clocks */ | 1185 | /* watermark for high clocks */ |
| 1183 | if (adev->pm.dpm_enabled) { | 1186 | if (adev->pm.dpm_enabled) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 1f3552967ba3..d8c9a959493e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | |||
| @@ -983,8 +983,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, | |||
| 983 | fixed20_12 a, b, c; | 983 | fixed20_12 a, b, c; |
| 984 | 984 | ||
| 985 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | 985 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
| 986 | active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; | 986 | active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, |
| 987 | line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); | 987 | (u32)mode->clock); |
| 988 | line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, | ||
| 989 | (u32)mode->clock); | ||
| 990 | line_time = min(line_time, (u32)65535); | ||
| 988 | priority_a_cnt = 0; | 991 | priority_a_cnt = 0; |
| 989 | priority_b_cnt = 0; | 992 | priority_b_cnt = 0; |
| 990 | 993 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 3c558c170e5e..db30c6ba563a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
| @@ -1091,8 +1091,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, | |||
| 1091 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; | 1091 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
| 1092 | 1092 | ||
| 1093 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | 1093 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
| 1094 | active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; | 1094 | active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, |
| 1095 | line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); | 1095 | (u32)mode->clock); |
| 1096 | line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, | ||
| 1097 | (u32)mode->clock); | ||
| 1098 | line_time = min(line_time, (u32)65535); | ||
| 1096 | 1099 | ||
| 1097 | /* watermark for high clocks */ | 1100 | /* watermark for high clocks */ |
| 1098 | if (adev->pm.dpm_enabled) { | 1101 | if (adev->pm.dpm_enabled) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index fb0819359909..90332f55cfba 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
| @@ -77,13 +77,26 @@ static int vce_v3_0_set_clockgating_state(void *handle, | |||
| 77 | static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) | 77 | static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) |
| 78 | { | 78 | { |
| 79 | struct amdgpu_device *adev = ring->adev; | 79 | struct amdgpu_device *adev = ring->adev; |
| 80 | u32 v; | ||
| 81 | |||
| 82 | mutex_lock(&adev->grbm_idx_mutex); | ||
| 83 | if (adev->vce.harvest_config == 0 || | ||
| 84 | adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) | ||
| 85 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); | ||
| 86 | else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) | ||
| 87 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); | ||
| 80 | 88 | ||
| 81 | if (ring == &adev->vce.ring[0]) | 89 | if (ring == &adev->vce.ring[0]) |
| 82 | return RREG32(mmVCE_RB_RPTR); | 90 | v = RREG32(mmVCE_RB_RPTR); |
| 83 | else if (ring == &adev->vce.ring[1]) | 91 | else if (ring == &adev->vce.ring[1]) |
| 84 | return RREG32(mmVCE_RB_RPTR2); | 92 | v = RREG32(mmVCE_RB_RPTR2); |
| 85 | else | 93 | else |
| 86 | return RREG32(mmVCE_RB_RPTR3); | 94 | v = RREG32(mmVCE_RB_RPTR3); |
| 95 | |||
| 96 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); | ||
| 97 | mutex_unlock(&adev->grbm_idx_mutex); | ||
| 98 | |||
| 99 | return v; | ||
| 87 | } | 100 | } |
| 88 | 101 | ||
| 89 | /** | 102 | /** |
| @@ -96,13 +109,26 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) | |||
| 96 | static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) | 109 | static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) |
| 97 | { | 110 | { |
| 98 | struct amdgpu_device *adev = ring->adev; | 111 | struct amdgpu_device *adev = ring->adev; |
| 112 | u32 v; | ||
| 113 | |||
| 114 | mutex_lock(&adev->grbm_idx_mutex); | ||
| 115 | if (adev->vce.harvest_config == 0 || | ||
| 116 | adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) | ||
| 117 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); | ||
| 118 | else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) | ||
| 119 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); | ||
| 99 | 120 | ||
| 100 | if (ring == &adev->vce.ring[0]) | 121 | if (ring == &adev->vce.ring[0]) |
| 101 | return RREG32(mmVCE_RB_WPTR); | 122 | v = RREG32(mmVCE_RB_WPTR); |
| 102 | else if (ring == &adev->vce.ring[1]) | 123 | else if (ring == &adev->vce.ring[1]) |
| 103 | return RREG32(mmVCE_RB_WPTR2); | 124 | v = RREG32(mmVCE_RB_WPTR2); |
| 104 | else | 125 | else |
| 105 | return RREG32(mmVCE_RB_WPTR3); | 126 | v = RREG32(mmVCE_RB_WPTR3); |
| 127 | |||
| 128 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); | ||
| 129 | mutex_unlock(&adev->grbm_idx_mutex); | ||
| 130 | |||
| 131 | return v; | ||
| 106 | } | 132 | } |
| 107 | 133 | ||
| 108 | /** | 134 | /** |
| @@ -116,12 +142,22 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) | |||
| 116 | { | 142 | { |
| 117 | struct amdgpu_device *adev = ring->adev; | 143 | struct amdgpu_device *adev = ring->adev; |
| 118 | 144 | ||
| 145 | mutex_lock(&adev->grbm_idx_mutex); | ||
| 146 | if (adev->vce.harvest_config == 0 || | ||
| 147 | adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) | ||
| 148 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); | ||
| 149 | else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) | ||
| 150 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); | ||
| 151 | |||
| 119 | if (ring == &adev->vce.ring[0]) | 152 | if (ring == &adev->vce.ring[0]) |
| 120 | WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); | 153 | WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); |
| 121 | else if (ring == &adev->vce.ring[1]) | 154 | else if (ring == &adev->vce.ring[1]) |
| 122 | WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); | 155 | WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); |
| 123 | else | 156 | else |
| 124 | WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); | 157 | WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); |
| 158 | |||
| 159 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); | ||
| 160 | mutex_unlock(&adev->grbm_idx_mutex); | ||
| 125 | } | 161 | } |
| 126 | 162 | ||
| 127 | static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) | 163 | static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) |
| @@ -231,33 +267,38 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
| 231 | struct amdgpu_ring *ring; | 267 | struct amdgpu_ring *ring; |
| 232 | int idx, r; | 268 | int idx, r; |
| 233 | 269 | ||
| 234 | ring = &adev->vce.ring[0]; | ||
| 235 | WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); | ||
| 236 | WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); | ||
| 237 | WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); | ||
| 238 | WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | ||
| 239 | WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); | ||
| 240 | |||
| 241 | ring = &adev->vce.ring[1]; | ||
| 242 | WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); | ||
| 243 | WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); | ||
| 244 | WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); | ||
| 245 | WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); | ||
| 246 | WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); | ||
| 247 | |||
| 248 | ring = &adev->vce.ring[2]; | ||
| 249 | WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); | ||
| 250 | WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); | ||
| 251 | WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); | ||
| 252 | WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); | ||
| 253 | WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); | ||
| 254 | |||
| 255 | mutex_lock(&adev->grbm_idx_mutex); | 270 | mutex_lock(&adev->grbm_idx_mutex); |
| 256 | for (idx = 0; idx < 2; ++idx) { | 271 | for (idx = 0; idx < 2; ++idx) { |
| 257 | if (adev->vce.harvest_config & (1 << idx)) | 272 | if (adev->vce.harvest_config & (1 << idx)) |
| 258 | continue; | 273 | continue; |
| 259 | 274 | ||
| 260 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); | 275 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); |
| 276 | |||
| 277 | /* Program instance 0 reg space for two instances or instance 0 case | ||
| 278 | program instance 1 reg space for only instance 1 available case */ | ||
| 279 | if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) { | ||
| 280 | ring = &adev->vce.ring[0]; | ||
| 281 | WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); | ||
| 282 | WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); | ||
| 283 | WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); | ||
| 284 | WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | ||
| 285 | WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); | ||
| 286 | |||
| 287 | ring = &adev->vce.ring[1]; | ||
| 288 | WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); | ||
| 289 | WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); | ||
| 290 | WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); | ||
| 291 | WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); | ||
| 292 | WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); | ||
| 293 | |||
| 294 | ring = &adev->vce.ring[2]; | ||
| 295 | WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); | ||
| 296 | WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); | ||
| 297 | WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); | ||
| 298 | WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); | ||
| 299 | WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); | ||
| 300 | } | ||
| 301 | |||
| 261 | vce_v3_0_mc_resume(adev, idx); | 302 | vce_v3_0_mc_resume(adev, idx); |
| 262 | WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); | 303 | WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); |
| 263 | 304 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index d5f53d04fa08..83e40fe51b62 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | |||
| @@ -709,17 +709,17 @@ static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr, | |||
| 709 | 709 | ||
| 710 | static struct phm_master_table_item | 710 | static struct phm_master_table_item |
| 711 | vega10_thermal_start_thermal_controller_master_list[] = { | 711 | vega10_thermal_start_thermal_controller_master_list[] = { |
| 712 | {NULL, tf_vega10_thermal_initialize}, | 712 | { .tableFunction = tf_vega10_thermal_initialize }, |
| 713 | {NULL, tf_vega10_thermal_set_temperature_range}, | 713 | { .tableFunction = tf_vega10_thermal_set_temperature_range }, |
| 714 | {NULL, tf_vega10_thermal_enable_alert}, | 714 | { .tableFunction = tf_vega10_thermal_enable_alert }, |
| 715 | /* We should restrict performance levels to low before we halt the SMC. | 715 | /* We should restrict performance levels to low before we halt the SMC. |
| 716 | * On the other hand we are still in boot state when we do this | 716 | * On the other hand we are still in boot state when we do this |
| 717 | * so it would be pointless. | 717 | * so it would be pointless. |
| 718 | * If this assumption changes we have to revisit this table. | 718 | * If this assumption changes we have to revisit this table. |
| 719 | */ | 719 | */ |
| 720 | {NULL, tf_vega10_thermal_setup_fan_table}, | 720 | { .tableFunction = tf_vega10_thermal_setup_fan_table }, |
| 721 | {NULL, tf_vega10_thermal_start_smc_fan_control}, | 721 | { .tableFunction = tf_vega10_thermal_start_smc_fan_control }, |
| 722 | {NULL, NULL} | 722 | { } |
| 723 | }; | 723 | }; |
| 724 | 724 | ||
| 725 | static struct phm_master_table_header | 725 | static struct phm_master_table_header |
| @@ -731,10 +731,10 @@ vega10_thermal_start_thermal_controller_master = { | |||
| 731 | 731 | ||
| 732 | static struct phm_master_table_item | 732 | static struct phm_master_table_item |
| 733 | vega10_thermal_set_temperature_range_master_list[] = { | 733 | vega10_thermal_set_temperature_range_master_list[] = { |
| 734 | {NULL, tf_vega10_thermal_disable_alert}, | 734 | { .tableFunction = tf_vega10_thermal_disable_alert }, |
| 735 | {NULL, tf_vega10_thermal_set_temperature_range}, | 735 | { .tableFunction = tf_vega10_thermal_set_temperature_range }, |
| 736 | {NULL, tf_vega10_thermal_enable_alert}, | 736 | { .tableFunction = tf_vega10_thermal_enable_alert }, |
| 737 | {NULL, NULL} | 737 | { } |
| 738 | }; | 738 | }; |
| 739 | 739 | ||
| 740 | struct phm_master_table_header | 740 | struct phm_master_table_header |
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig index 40d2827a6d19..53e78d092d18 100644 --- a/drivers/gpu/drm/bridge/synopsys/Kconfig +++ b/drivers/gpu/drm/bridge/synopsys/Kconfig | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | config DRM_DW_HDMI | 1 | config DRM_DW_HDMI |
| 2 | tristate | 2 | tristate |
| 3 | select DRM_KMS_HELPER | 3 | select DRM_KMS_HELPER |
| 4 | select REGMAP_MMIO | ||
| 4 | 5 | ||
| 5 | config DRM_DW_HDMI_AHB_AUDIO | 6 | config DRM_DW_HDMI_AHB_AUDIO |
| 6 | tristate "Synopsys Designware AHB Audio interface" | 7 | tristate "Synopsys Designware AHB Audio interface" |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 8be9719284b0..aa885a614e27 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
| @@ -508,6 +508,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, | |||
| 508 | bool has_connectors = | 508 | bool has_connectors = |
| 509 | !!new_crtc_state->connector_mask; | 509 | !!new_crtc_state->connector_mask; |
| 510 | 510 | ||
| 511 | WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); | ||
| 512 | |||
| 511 | if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) { | 513 | if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) { |
| 512 | DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", | 514 | DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", |
| 513 | crtc->base.id, crtc->name); | 515 | crtc->base.id, crtc->name); |
| @@ -551,6 +553,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, | |||
| 551 | for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { | 553 | for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { |
| 552 | const struct drm_connector_helper_funcs *funcs = connector->helper_private; | 554 | const struct drm_connector_helper_funcs *funcs = connector->helper_private; |
| 553 | 555 | ||
| 556 | WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); | ||
| 557 | |||
| 554 | /* | 558 | /* |
| 555 | * This only sets crtc->connectors_changed for routing changes, | 559 | * This only sets crtc->connectors_changed for routing changes, |
| 556 | * drivers must set crtc->connectors_changed themselves when | 560 | * drivers must set crtc->connectors_changed themselves when |
| @@ -650,6 +654,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev, | |||
| 650 | for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { | 654 | for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { |
| 651 | const struct drm_plane_helper_funcs *funcs; | 655 | const struct drm_plane_helper_funcs *funcs; |
| 652 | 656 | ||
| 657 | WARN_ON(!drm_modeset_is_locked(&plane->mutex)); | ||
| 658 | |||
| 653 | funcs = plane->helper_private; | 659 | funcs = plane->helper_private; |
| 654 | 660 | ||
| 655 | drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane); | 661 | drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane); |
| @@ -2663,7 +2669,12 @@ int drm_atomic_helper_resume(struct drm_device *dev, | |||
| 2663 | 2669 | ||
| 2664 | drm_modeset_acquire_init(&ctx, 0); | 2670 | drm_modeset_acquire_init(&ctx, 0); |
| 2665 | while (1) { | 2671 | while (1) { |
| 2672 | err = drm_modeset_lock_all_ctx(dev, &ctx); | ||
| 2673 | if (err) | ||
| 2674 | goto out; | ||
| 2675 | |||
| 2666 | err = drm_atomic_helper_commit_duplicated_state(state, &ctx); | 2676 | err = drm_atomic_helper_commit_duplicated_state(state, &ctx); |
| 2677 | out: | ||
| 2667 | if (err != -EDEADLK) | 2678 | if (err != -EDEADLK) |
| 2668 | break; | 2679 | break; |
| 2669 | 2680 | ||
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 9f847615ac74..48ca2457df8c 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c | |||
| @@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, | |||
| 1229 | if (!connector) | 1229 | if (!connector) |
| 1230 | return -ENOENT; | 1230 | return -ENOENT; |
| 1231 | 1231 | ||
| 1232 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); | ||
| 1233 | encoder = drm_connector_get_encoder(connector); | ||
| 1234 | if (encoder) | ||
| 1235 | out_resp->encoder_id = encoder->base.id; | ||
| 1236 | else | ||
| 1237 | out_resp->encoder_id = 0; | ||
| 1238 | |||
| 1239 | ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, | ||
| 1240 | (uint32_t __user *)(unsigned long)(out_resp->props_ptr), | ||
| 1241 | (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), | ||
| 1242 | &out_resp->count_props); | ||
| 1243 | drm_modeset_unlock(&dev->mode_config.connection_mutex); | ||
| 1244 | if (ret) | ||
| 1245 | goto out_unref; | ||
| 1246 | |||
| 1247 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) | 1232 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) |
| 1248 | if (connector->encoder_ids[i] != 0) | 1233 | if (connector->encoder_ids[i] != 0) |
| 1249 | encoders_count++; | 1234 | encoders_count++; |
| @@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, | |||
| 1256 | if (put_user(connector->encoder_ids[i], | 1241 | if (put_user(connector->encoder_ids[i], |
| 1257 | encoder_ptr + copied)) { | 1242 | encoder_ptr + copied)) { |
| 1258 | ret = -EFAULT; | 1243 | ret = -EFAULT; |
| 1259 | goto out_unref; | 1244 | goto out; |
| 1260 | } | 1245 | } |
| 1261 | copied++; | 1246 | copied++; |
| 1262 | } | 1247 | } |
| @@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, | |||
| 1300 | if (copy_to_user(mode_ptr + copied, | 1285 | if (copy_to_user(mode_ptr + copied, |
| 1301 | &u_mode, sizeof(u_mode))) { | 1286 | &u_mode, sizeof(u_mode))) { |
| 1302 | ret = -EFAULT; | 1287 | ret = -EFAULT; |
| 1288 | mutex_unlock(&dev->mode_config.mutex); | ||
| 1289 | |||
| 1303 | goto out; | 1290 | goto out; |
| 1304 | } | 1291 | } |
| 1305 | copied++; | 1292 | copied++; |
| 1306 | } | 1293 | } |
| 1307 | } | 1294 | } |
| 1308 | out_resp->count_modes = mode_count; | 1295 | out_resp->count_modes = mode_count; |
| 1309 | out: | ||
| 1310 | mutex_unlock(&dev->mode_config.mutex); | 1296 | mutex_unlock(&dev->mode_config.mutex); |
| 1311 | out_unref: | 1297 | |
| 1298 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); | ||
| 1299 | encoder = drm_connector_get_encoder(connector); | ||
| 1300 | if (encoder) | ||
| 1301 | out_resp->encoder_id = encoder->base.id; | ||
| 1302 | else | ||
| 1303 | out_resp->encoder_id = 0; | ||
| 1304 | |||
| 1305 | /* Only grab properties after probing, to make sure EDID and other | ||
| 1306 | * properties reflect the latest status. */ | ||
| 1307 | ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, | ||
| 1308 | (uint32_t __user *)(unsigned long)(out_resp->props_ptr), | ||
| 1309 | (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), | ||
| 1310 | &out_resp->count_props); | ||
| 1311 | drm_modeset_unlock(&dev->mode_config.connection_mutex); | ||
| 1312 | |||
| 1313 | out: | ||
| 1312 | drm_connector_put(connector); | 1314 | drm_connector_put(connector); |
| 1313 | 1315 | ||
| 1314 | return ret; | 1316 | return ret; |
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 3e5f52110ea1..213fb837e1c4 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c | |||
| @@ -1208,3 +1208,86 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux) | |||
| 1208 | return 0; | 1208 | return 0; |
| 1209 | } | 1209 | } |
| 1210 | EXPORT_SYMBOL(drm_dp_stop_crc); | 1210 | EXPORT_SYMBOL(drm_dp_stop_crc); |
| 1211 | |||
| 1212 | struct dpcd_quirk { | ||
| 1213 | u8 oui[3]; | ||
| 1214 | bool is_branch; | ||
| 1215 | u32 quirks; | ||
| 1216 | }; | ||
| 1217 | |||
| 1218 | #define OUI(first, second, third) { (first), (second), (third) } | ||
| 1219 | |||
| 1220 | static const struct dpcd_quirk dpcd_quirk_list[] = { | ||
| 1221 | /* Analogix 7737 needs reduced M and N at HBR2 link rates */ | ||
| 1222 | { OUI(0x00, 0x22, 0xb9), true, BIT(DP_DPCD_QUIRK_LIMITED_M_N) }, | ||
| 1223 | }; | ||
| 1224 | |||
| 1225 | #undef OUI | ||
| 1226 | |||
| 1227 | /* | ||
| 1228 | * Get a bit mask of DPCD quirks for the sink/branch device identified by | ||
| 1229 | * ident. The quirk data is shared but it's up to the drivers to act on the | ||
| 1230 | * data. | ||
| 1231 | * | ||
| 1232 | * For now, only the OUI (first three bytes) is used, but this may be extended | ||
| 1233 | * to device identification string and hardware/firmware revisions later. | ||
| 1234 | */ | ||
| 1235 | static u32 | ||
| 1236 | drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch) | ||
| 1237 | { | ||
| 1238 | const struct dpcd_quirk *quirk; | ||
| 1239 | u32 quirks = 0; | ||
| 1240 | int i; | ||
| 1241 | |||
| 1242 | for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) { | ||
| 1243 | quirk = &dpcd_quirk_list[i]; | ||
| 1244 | |||
| 1245 | if (quirk->is_branch != is_branch) | ||
| 1246 | continue; | ||
| 1247 | |||
| 1248 | if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0) | ||
| 1249 | continue; | ||
| 1250 | |||
| 1251 | quirks |= quirk->quirks; | ||
| 1252 | } | ||
| 1253 | |||
| 1254 | return quirks; | ||
| 1255 | } | ||
| 1256 | |||
| 1257 | /** | ||
| 1258 | * drm_dp_read_desc - read sink/branch descriptor from DPCD | ||
| 1259 | * @aux: DisplayPort AUX channel | ||
| 1260 | * @desc: Device decriptor to fill from DPCD | ||
| 1261 | * @is_branch: true for branch devices, false for sink devices | ||
| 1262 | * | ||
| 1263 | * Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the | ||
| 1264 | * identification. | ||
| 1265 | * | ||
| 1266 | * Returns 0 on success or a negative error code on failure. | ||
| 1267 | */ | ||
| 1268 | int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, | ||
| 1269 | bool is_branch) | ||
| 1270 | { | ||
| 1271 | struct drm_dp_dpcd_ident *ident = &desc->ident; | ||
| 1272 | unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI; | ||
| 1273 | int ret, dev_id_len; | ||
| 1274 | |||
| 1275 | ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident)); | ||
| 1276 | if (ret < 0) | ||
| 1277 | return ret; | ||
| 1278 | |||
| 1279 | desc->quirks = drm_dp_get_quirks(ident, is_branch); | ||
| 1280 | |||
| 1281 | dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id)); | ||
| 1282 | |||
| 1283 | DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n", | ||
| 1284 | is_branch ? "branch" : "sink", | ||
| 1285 | (int)sizeof(ident->oui), ident->oui, | ||
| 1286 | dev_id_len, ident->device_id, | ||
| 1287 | ident->hw_rev >> 4, ident->hw_rev & 0xf, | ||
| 1288 | ident->sw_major_rev, ident->sw_minor_rev, | ||
| 1289 | desc->quirks); | ||
| 1290 | |||
| 1291 | return 0; | ||
| 1292 | } | ||
| 1293 | EXPORT_SYMBOL(drm_dp_read_desc); | ||
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index b5c6bb46a425..37b8ad3e30d8 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
| @@ -358,7 +358,12 @@ EXPORT_SYMBOL(drm_put_dev); | |||
| 358 | void drm_unplug_dev(struct drm_device *dev) | 358 | void drm_unplug_dev(struct drm_device *dev) |
| 359 | { | 359 | { |
| 360 | /* for a USB device */ | 360 | /* for a USB device */ |
| 361 | drm_dev_unregister(dev); | 361 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
| 362 | drm_modeset_unregister_all(dev); | ||
| 363 | |||
| 364 | drm_minor_unregister(dev, DRM_MINOR_PRIMARY); | ||
| 365 | drm_minor_unregister(dev, DRM_MINOR_RENDER); | ||
| 366 | drm_minor_unregister(dev, DRM_MINOR_CONTROL); | ||
| 362 | 367 | ||
| 363 | mutex_lock(&drm_global_mutex); | 368 | mutex_lock(&drm_global_mutex); |
| 364 | 369 | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index c4a091e87426..e437fba1209d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h | |||
| @@ -106,9 +106,10 @@ struct etnaviv_gem_submit { | |||
| 106 | struct etnaviv_gpu *gpu; | 106 | struct etnaviv_gpu *gpu; |
| 107 | struct ww_acquire_ctx ticket; | 107 | struct ww_acquire_ctx ticket; |
| 108 | struct dma_fence *fence; | 108 | struct dma_fence *fence; |
| 109 | u32 flags; | ||
| 109 | unsigned int nr_bos; | 110 | unsigned int nr_bos; |
| 110 | struct etnaviv_gem_submit_bo bos[0]; | 111 | struct etnaviv_gem_submit_bo bos[0]; |
| 111 | u32 flags; | 112 | /* No new members here, the previous one is variable-length! */ |
| 112 | }; | 113 | }; |
| 113 | 114 | ||
| 114 | int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, | 115 | int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index de80ee1b71df..1013765274da 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | |||
| @@ -172,7 +172,7 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit) | |||
| 172 | for (i = 0; i < submit->nr_bos; i++) { | 172 | for (i = 0; i < submit->nr_bos; i++) { |
| 173 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; | 173 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; |
| 174 | bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; | 174 | bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; |
| 175 | bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); | 175 | bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); |
| 176 | 176 | ||
| 177 | ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write, | 177 | ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write, |
| 178 | explicit); | 178 | explicit); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 09d3c4c3c858..50294a7bd29d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
| @@ -82,14 +82,9 @@ err_file_priv_free: | |||
| 82 | return ret; | 82 | return ret; |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | static void exynos_drm_preclose(struct drm_device *dev, | ||
| 86 | struct drm_file *file) | ||
| 87 | { | ||
| 88 | exynos_drm_subdrv_close(dev, file); | ||
| 89 | } | ||
| 90 | |||
| 91 | static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) | 85 | static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) |
| 92 | { | 86 | { |
| 87 | exynos_drm_subdrv_close(dev, file); | ||
| 93 | kfree(file->driver_priv); | 88 | kfree(file->driver_priv); |
| 94 | file->driver_priv = NULL; | 89 | file->driver_priv = NULL; |
| 95 | } | 90 | } |
| @@ -145,7 +140,6 @@ static struct drm_driver exynos_drm_driver = { | |||
| 145 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 140 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
| 146 | | DRIVER_ATOMIC | DRIVER_RENDER, | 141 | | DRIVER_ATOMIC | DRIVER_RENDER, |
| 147 | .open = exynos_drm_open, | 142 | .open = exynos_drm_open, |
| 148 | .preclose = exynos_drm_preclose, | ||
| 149 | .lastclose = exynos_drm_lastclose, | 143 | .lastclose = exynos_drm_lastclose, |
| 150 | .postclose = exynos_drm_postclose, | 144 | .postclose = exynos_drm_postclose, |
| 151 | .gem_free_object_unlocked = exynos_drm_gem_free_object, | 145 | .gem_free_object_unlocked = exynos_drm_gem_free_object, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index cb3176930596..39c740572034 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
| @@ -160,12 +160,9 @@ struct exynos_drm_clk { | |||
| 160 | * drm framework doesn't support multiple irq yet. | 160 | * drm framework doesn't support multiple irq yet. |
| 161 | * we can refer to the crtc to current hardware interrupt occurred through | 161 | * we can refer to the crtc to current hardware interrupt occurred through |
| 162 | * this pipe value. | 162 | * this pipe value. |
| 163 | * @enabled: if the crtc is enabled or not | ||
| 164 | * @event: vblank event that is currently queued for flip | ||
| 165 | * @wait_update: wait all pending planes updates to finish | ||
| 166 | * @pending_update: number of pending plane updates in this crtc | ||
| 167 | * @ops: pointer to callbacks for exynos drm specific functionality | 163 | * @ops: pointer to callbacks for exynos drm specific functionality |
| 168 | * @ctx: A pointer to the crtc's implementation specific context | 164 | * @ctx: A pointer to the crtc's implementation specific context |
| 165 | * @pipe_clk: A pointer to the crtc's pipeline clock. | ||
| 169 | */ | 166 | */ |
| 170 | struct exynos_drm_crtc { | 167 | struct exynos_drm_crtc { |
| 171 | struct drm_crtc base; | 168 | struct drm_crtc base; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index fc4fda738906..d404de86d5f9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c | |||
| @@ -1633,7 +1633,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi) | |||
| 1633 | { | 1633 | { |
| 1634 | struct device *dev = dsi->dev; | 1634 | struct device *dev = dsi->dev; |
| 1635 | struct device_node *node = dev->of_node; | 1635 | struct device_node *node = dev->of_node; |
| 1636 | struct device_node *ep; | ||
| 1637 | int ret; | 1636 | int ret; |
| 1638 | 1637 | ||
| 1639 | ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency", | 1638 | ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency", |
| @@ -1641,32 +1640,21 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi) | |||
| 1641 | if (ret < 0) | 1640 | if (ret < 0) |
| 1642 | return ret; | 1641 | return ret; |
| 1643 | 1642 | ||
| 1644 | ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0); | 1643 | ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency", |
| 1645 | if (!ep) { | ||
| 1646 | dev_err(dev, "no output port with endpoint specified\n"); | ||
| 1647 | return -EINVAL; | ||
| 1648 | } | ||
| 1649 | |||
| 1650 | ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency", | ||
| 1651 | &dsi->burst_clk_rate); | 1644 | &dsi->burst_clk_rate); |
| 1652 | if (ret < 0) | 1645 | if (ret < 0) |
| 1653 | goto end; | 1646 | return ret; |
| 1654 | 1647 | ||
| 1655 | ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency", | 1648 | ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency", |
| 1656 | &dsi->esc_clk_rate); | 1649 | &dsi->esc_clk_rate); |
| 1657 | if (ret < 0) | 1650 | if (ret < 0) |
| 1658 | goto end; | 1651 | return ret; |
| 1659 | |||
| 1660 | of_node_put(ep); | ||
| 1661 | 1652 | ||
| 1662 | dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0); | 1653 | dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0); |
| 1663 | if (!dsi->bridge_node) | 1654 | if (!dsi->bridge_node) |
| 1664 | return -EINVAL; | 1655 | return -EINVAL; |
| 1665 | 1656 | ||
| 1666 | end: | 1657 | return 0; |
| 1667 | of_node_put(ep); | ||
| 1668 | |||
| 1669 | return ret; | ||
| 1670 | } | 1658 | } |
| 1671 | 1659 | ||
| 1672 | static int exynos_dsi_bind(struct device *dev, struct device *master, | 1660 | static int exynos_dsi_bind(struct device *dev, struct device *master, |
| @@ -1817,6 +1805,10 @@ static int exynos_dsi_probe(struct platform_device *pdev) | |||
| 1817 | 1805 | ||
| 1818 | static int exynos_dsi_remove(struct platform_device *pdev) | 1806 | static int exynos_dsi_remove(struct platform_device *pdev) |
| 1819 | { | 1807 | { |
| 1808 | struct exynos_dsi *dsi = platform_get_drvdata(pdev); | ||
| 1809 | |||
| 1810 | of_node_put(dsi->bridge_node); | ||
| 1811 | |||
| 1820 | pm_runtime_disable(&pdev->dev); | 1812 | pm_runtime_disable(&pdev->dev); |
| 1821 | 1813 | ||
| 1822 | component_del(&pdev->dev, &exynos_dsi_component_ops); | 1814 | component_del(&pdev->dev, &exynos_dsi_component_ops); |
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c index 5abc69c9630f..f77dcfaade6c 100644 --- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c +++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c | |||
| @@ -760,7 +760,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi) | |||
| 760 | * Get the endpoint node. In our case, dsi has one output port1 | 760 | * Get the endpoint node. In our case, dsi has one output port1 |
| 761 | * to which the external HDMI bridge is connected. | 761 | * to which the external HDMI bridge is connected. |
| 762 | */ | 762 | */ |
| 763 | ret = drm_of_find_panel_or_bridge(np, 0, 0, NULL, &dsi->bridge); | 763 | ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge); |
| 764 | if (ret) | 764 | if (ret) |
| 765 | return ret; | 765 | return ret; |
| 766 | 766 | ||
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index dca989eb2d42..24fe04d6307b 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c | |||
| @@ -779,8 +779,26 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) | |||
| 779 | vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; | 779 | vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; |
| 780 | } | 780 | } |
| 781 | 781 | ||
| 782 | static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) | ||
| 783 | { | ||
| 784 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||
| 785 | struct intel_engine_cs *engine; | ||
| 786 | struct intel_vgpu_workload *pos, *n; | ||
| 787 | unsigned int tmp; | ||
| 788 | |||
| 789 | /* free the unsubmited workloads in the queues. */ | ||
| 790 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { | ||
| 791 | list_for_each_entry_safe(pos, n, | ||
| 792 | &vgpu->workload_q_head[engine->id], list) { | ||
| 793 | list_del_init(&pos->list); | ||
| 794 | free_workload(pos); | ||
| 795 | } | ||
| 796 | } | ||
| 797 | } | ||
| 798 | |||
| 782 | void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) | 799 | void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) |
| 783 | { | 800 | { |
| 801 | clean_workloads(vgpu, ALL_ENGINES); | ||
| 784 | kmem_cache_destroy(vgpu->workloads); | 802 | kmem_cache_destroy(vgpu->workloads); |
| 785 | } | 803 | } |
| 786 | 804 | ||
| @@ -811,17 +829,9 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, | |||
| 811 | { | 829 | { |
| 812 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 830 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
| 813 | struct intel_engine_cs *engine; | 831 | struct intel_engine_cs *engine; |
| 814 | struct intel_vgpu_workload *pos, *n; | ||
| 815 | unsigned int tmp; | 832 | unsigned int tmp; |
| 816 | 833 | ||
| 817 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { | 834 | clean_workloads(vgpu, engine_mask); |
| 818 | /* free the unsubmited workload in the queue */ | 835 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) |
| 819 | list_for_each_entry_safe(pos, n, | ||
| 820 | &vgpu->workload_q_head[engine->id], list) { | ||
| 821 | list_del_init(&pos->list); | ||
| 822 | free_workload(pos); | ||
| 823 | } | ||
| 824 | |||
| 825 | init_vgpu_execlist(vgpu, engine->id); | 836 | init_vgpu_execlist(vgpu, engine->id); |
| 826 | } | ||
| 827 | } | 837 | } |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index c995e540ff96..0ffd69654592 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
| @@ -1366,18 +1366,28 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 1366 | void *p_data, unsigned int bytes) | 1366 | void *p_data, unsigned int bytes) |
| 1367 | { | 1367 | { |
| 1368 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 1368 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
| 1369 | i915_reg_t reg = {.reg = offset}; | 1369 | u32 v = *(u32 *)p_data; |
| 1370 | |||
| 1371 | if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) | ||
| 1372 | return intel_vgpu_default_mmio_write(vgpu, | ||
| 1373 | offset, p_data, bytes); | ||
| 1370 | 1374 | ||
| 1371 | switch (offset) { | 1375 | switch (offset) { |
| 1372 | case 0x4ddc: | 1376 | case 0x4ddc: |
| 1373 | vgpu_vreg(vgpu, offset) = 0x8000003c; | 1377 | /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ |
| 1374 | /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */ | 1378 | vgpu_vreg(vgpu, offset) = v & ~(1 << 31); |
| 1375 | I915_WRITE(reg, vgpu_vreg(vgpu, offset)); | ||
| 1376 | break; | 1379 | break; |
| 1377 | case 0x42080: | 1380 | case 0x42080: |
| 1378 | vgpu_vreg(vgpu, offset) = 0x8000; | 1381 | /* bypass WaCompressedResourceDisplayNewHashMode */ |
| 1379 | /* WaCompressedResourceDisplayNewHashMode:skl */ | 1382 | vgpu_vreg(vgpu, offset) = v & ~(1 << 15); |
| 1380 | I915_WRITE(reg, vgpu_vreg(vgpu, offset)); | 1383 | break; |
| 1384 | case 0xe194: | ||
| 1385 | /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ | ||
| 1386 | vgpu_vreg(vgpu, offset) = v & ~(1 << 8); | ||
| 1387 | break; | ||
| 1388 | case 0x7014: | ||
| 1389 | /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ | ||
| 1390 | vgpu_vreg(vgpu, offset) = v & ~(1 << 13); | ||
| 1381 | break; | 1391 | break; |
| 1382 | default: | 1392 | default: |
| 1383 | return -EINVAL; | 1393 | return -EINVAL; |
| @@ -1634,7 +1644,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) | |||
| 1634 | MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1644 | MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); |
| 1635 | MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, | 1645 | MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, |
| 1636 | NULL, NULL); | 1646 | NULL, NULL); |
| 1637 | MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 1647 | MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, |
| 1648 | skl_misc_ctl_write); | ||
| 1638 | MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1649 | MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); |
| 1639 | MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1650 | MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); |
| 1640 | MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1651 | MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); |
| @@ -2568,7 +2579,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
| 2568 | MMIO_D(0x6e570, D_BDW_PLUS); | 2579 | MMIO_D(0x6e570, D_BDW_PLUS); |
| 2569 | MMIO_D(0x65f10, D_BDW_PLUS); | 2580 | MMIO_D(0x65f10, D_BDW_PLUS); |
| 2570 | 2581 | ||
| 2571 | MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2582 | MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, |
| 2583 | skl_misc_ctl_write); | ||
| 2572 | MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2584 | MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
| 2573 | MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2585 | MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
| 2574 | MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2586 | MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d689e511744e..4bd1467c17b1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data) | |||
| 292 | struct file_stats *stats = data; | 292 | struct file_stats *stats = data; |
| 293 | struct i915_vma *vma; | 293 | struct i915_vma *vma; |
| 294 | 294 | ||
| 295 | lockdep_assert_held(&obj->base.dev->struct_mutex); | ||
| 296 | |||
| 295 | stats->count++; | 297 | stats->count++; |
| 296 | stats->total += obj->base.size; | 298 | stats->total += obj->base.size; |
| 297 | if (!obj->bind_count) | 299 | if (!obj->bind_count) |
| @@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data) | |||
| 476 | struct drm_i915_gem_request *request; | 478 | struct drm_i915_gem_request *request; |
| 477 | struct task_struct *task; | 479 | struct task_struct *task; |
| 478 | 480 | ||
| 481 | mutex_lock(&dev->struct_mutex); | ||
| 482 | |||
| 479 | memset(&stats, 0, sizeof(stats)); | 483 | memset(&stats, 0, sizeof(stats)); |
| 480 | stats.file_priv = file->driver_priv; | 484 | stats.file_priv = file->driver_priv; |
| 481 | spin_lock(&file->table_lock); | 485 | spin_lock(&file->table_lock); |
| @@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data) | |||
| 487 | * still alive (e.g. get_pid(current) => fork() => exit()). | 491 | * still alive (e.g. get_pid(current) => fork() => exit()). |
| 488 | * Therefore, we need to protect this ->comm access using RCU. | 492 | * Therefore, we need to protect this ->comm access using RCU. |
| 489 | */ | 493 | */ |
| 490 | mutex_lock(&dev->struct_mutex); | ||
| 491 | request = list_first_entry_or_null(&file_priv->mm.request_list, | 494 | request = list_first_entry_or_null(&file_priv->mm.request_list, |
| 492 | struct drm_i915_gem_request, | 495 | struct drm_i915_gem_request, |
| 493 | client_link); | 496 | client_link); |
| @@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) | |||
| 497 | PIDTYPE_PID); | 500 | PIDTYPE_PID); |
| 498 | print_file_stats(m, task ? task->comm : "<unknown>", stats); | 501 | print_file_stats(m, task ? task->comm : "<unknown>", stats); |
| 499 | rcu_read_unlock(); | 502 | rcu_read_unlock(); |
| 503 | |||
| 500 | mutex_unlock(&dev->struct_mutex); | 504 | mutex_unlock(&dev->struct_mutex); |
| 501 | } | 505 | } |
| 502 | mutex_unlock(&dev->filelist_mutex); | 506 | mutex_unlock(&dev->filelist_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3036d4835b0f..48428672fc6e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -1235,6 +1235,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1235 | goto out_fini; | 1235 | goto out_fini; |
| 1236 | 1236 | ||
| 1237 | pci_set_drvdata(pdev, &dev_priv->drm); | 1237 | pci_set_drvdata(pdev, &dev_priv->drm); |
| 1238 | /* | ||
| 1239 | * Disable the system suspend direct complete optimization, which can | ||
| 1240 | * leave the device suspended skipping the driver's suspend handlers | ||
| 1241 | * if the device was already runtime suspended. This is needed due to | ||
| 1242 | * the difference in our runtime and system suspend sequence and | ||
| 1243 | * becaue the HDA driver may require us to enable the audio power | ||
| 1244 | * domain during system suspend. | ||
| 1245 | */ | ||
| 1246 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; | ||
| 1238 | 1247 | ||
| 1239 | ret = i915_driver_init_early(dev_priv, ent); | 1248 | ret = i915_driver_init_early(dev_priv, ent); |
| 1240 | if (ret < 0) | 1249 | if (ret < 0) |
| @@ -1272,10 +1281,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1272 | 1281 | ||
| 1273 | dev_priv->ipc_enabled = false; | 1282 | dev_priv->ipc_enabled = false; |
| 1274 | 1283 | ||
| 1275 | /* Everything is in place, we can now relax! */ | ||
| 1276 | DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", | ||
| 1277 | driver.name, driver.major, driver.minor, driver.patchlevel, | ||
| 1278 | driver.date, pci_name(pdev), dev_priv->drm.primary->index); | ||
| 1279 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) | 1284 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) |
| 1280 | DRM_INFO("DRM_I915_DEBUG enabled\n"); | 1285 | DRM_INFO("DRM_I915_DEBUG enabled\n"); |
| 1281 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) | 1286 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c9b0949f6c1a..2c453a4e97d5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -562,7 +562,8 @@ struct intel_link_m_n { | |||
| 562 | 562 | ||
| 563 | void intel_link_compute_m_n(int bpp, int nlanes, | 563 | void intel_link_compute_m_n(int bpp, int nlanes, |
| 564 | int pixel_clock, int link_clock, | 564 | int pixel_clock, int link_clock, |
| 565 | struct intel_link_m_n *m_n); | 565 | struct intel_link_m_n *m_n, |
| 566 | bool reduce_m_n); | ||
| 566 | 567 | ||
| 567 | /* Interface history: | 568 | /* Interface history: |
| 568 | * | 569 | * |
| @@ -2990,6 +2991,16 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) | |||
| 2990 | return false; | 2991 | return false; |
| 2991 | } | 2992 | } |
| 2992 | 2993 | ||
| 2994 | static inline bool | ||
| 2995 | intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv) | ||
| 2996 | { | ||
| 2997 | #ifdef CONFIG_INTEL_IOMMU | ||
| 2998 | if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped) | ||
| 2999 | return true; | ||
| 3000 | #endif | ||
| 3001 | return false; | ||
| 3002 | } | ||
| 3003 | |||
| 2993 | int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, | 3004 | int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, |
| 2994 | int enable_ppgtt); | 3005 | int enable_ppgtt); |
| 2995 | 3006 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b6ac3df18b58..615f0a855222 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) | |||
| 2285 | struct page *page; | 2285 | struct page *page; |
| 2286 | unsigned long last_pfn = 0; /* suppress gcc warning */ | 2286 | unsigned long last_pfn = 0; /* suppress gcc warning */ |
| 2287 | unsigned int max_segment; | 2287 | unsigned int max_segment; |
| 2288 | gfp_t noreclaim; | ||
| 2288 | int ret; | 2289 | int ret; |
| 2289 | gfp_t gfp; | ||
| 2290 | 2290 | ||
| 2291 | /* Assert that the object is not currently in any GPU domain. As it | 2291 | /* Assert that the object is not currently in any GPU domain. As it |
| 2292 | * wasn't in the GTT, there shouldn't be any way it could have been in | 2292 | * wasn't in the GTT, there shouldn't be any way it could have been in |
| @@ -2315,22 +2315,31 @@ rebuild_st: | |||
| 2315 | * Fail silently without starting the shrinker | 2315 | * Fail silently without starting the shrinker |
| 2316 | */ | 2316 | */ |
| 2317 | mapping = obj->base.filp->f_mapping; | 2317 | mapping = obj->base.filp->f_mapping; |
| 2318 | gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); | 2318 | noreclaim = mapping_gfp_constraint(mapping, |
| 2319 | gfp |= __GFP_NORETRY | __GFP_NOWARN; | 2319 | ~(__GFP_IO | __GFP_RECLAIM)); |
| 2320 | noreclaim |= __GFP_NORETRY | __GFP_NOWARN; | ||
| 2321 | |||
| 2320 | sg = st->sgl; | 2322 | sg = st->sgl; |
| 2321 | st->nents = 0; | 2323 | st->nents = 0; |
| 2322 | for (i = 0; i < page_count; i++) { | 2324 | for (i = 0; i < page_count; i++) { |
| 2323 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); | 2325 | const unsigned int shrink[] = { |
| 2324 | if (unlikely(IS_ERR(page))) { | 2326 | I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, |
| 2325 | i915_gem_shrink(dev_priv, | 2327 | 0, |
| 2326 | page_count, | 2328 | }, *s = shrink; |
| 2327 | I915_SHRINK_BOUND | | 2329 | gfp_t gfp = noreclaim; |
| 2328 | I915_SHRINK_UNBOUND | | 2330 | |
| 2329 | I915_SHRINK_PURGEABLE); | 2331 | do { |
| 2330 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); | 2332 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); |
| 2331 | } | 2333 | if (likely(!IS_ERR(page))) |
| 2332 | if (unlikely(IS_ERR(page))) { | 2334 | break; |
| 2333 | gfp_t reclaim; | 2335 | |
| 2336 | if (!*s) { | ||
| 2337 | ret = PTR_ERR(page); | ||
| 2338 | goto err_sg; | ||
| 2339 | } | ||
| 2340 | |||
| 2341 | i915_gem_shrink(dev_priv, 2 * page_count, *s++); | ||
| 2342 | cond_resched(); | ||
| 2334 | 2343 | ||
| 2335 | /* We've tried hard to allocate the memory by reaping | 2344 | /* We've tried hard to allocate the memory by reaping |
| 2336 | * our own buffer, now let the real VM do its job and | 2345 | * our own buffer, now let the real VM do its job and |
| @@ -2340,15 +2349,26 @@ rebuild_st: | |||
| 2340 | * defer the oom here by reporting the ENOMEM back | 2349 | * defer the oom here by reporting the ENOMEM back |
| 2341 | * to userspace. | 2350 | * to userspace. |
| 2342 | */ | 2351 | */ |
| 2343 | reclaim = mapping_gfp_mask(mapping); | 2352 | if (!*s) { |
| 2344 | reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ | 2353 | /* reclaim and warn, but no oom */ |
| 2345 | 2354 | gfp = mapping_gfp_mask(mapping); | |
| 2346 | page = shmem_read_mapping_page_gfp(mapping, i, reclaim); | 2355 | |
| 2347 | if (IS_ERR(page)) { | 2356 | /* Our bo are always dirty and so we require |
| 2348 | ret = PTR_ERR(page); | 2357 | * kswapd to reclaim our pages (direct reclaim |
| 2349 | goto err_sg; | 2358 | * does not effectively begin pageout of our |
| 2359 | * buffers on its own). However, direct reclaim | ||
| 2360 | * only waits for kswapd when under allocation | ||
| 2361 | * congestion. So as a result __GFP_RECLAIM is | ||
| 2362 | * unreliable and fails to actually reclaim our | ||
| 2363 | * dirty pages -- unless you try over and over | ||
| 2364 | * again with !__GFP_NORETRY. However, we still | ||
| 2365 | * want to fail this allocation rather than | ||
| 2366 | * trigger the out-of-memory killer and for | ||
| 2367 | * this we want the future __GFP_MAYFAIL. | ||
| 2368 | */ | ||
| 2350 | } | 2369 | } |
| 2351 | } | 2370 | } while (1); |
| 2371 | |||
| 2352 | if (!i || | 2372 | if (!i || |
| 2353 | sg->length >= max_segment || | 2373 | sg->length >= max_segment || |
| 2354 | page_to_pfn(page) != last_pfn + 1) { | 2374 | page_to_pfn(page) != last_pfn + 1) { |
| @@ -3298,6 +3318,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) | |||
| 3298 | { | 3318 | { |
| 3299 | int ret; | 3319 | int ret; |
| 3300 | 3320 | ||
| 3321 | /* If the device is asleep, we have no requests outstanding */ | ||
| 3322 | if (!READ_ONCE(i915->gt.awake)) | ||
| 3323 | return 0; | ||
| 3324 | |||
| 3301 | if (flags & I915_WAIT_LOCKED) { | 3325 | if (flags & I915_WAIT_LOCKED) { |
| 3302 | struct i915_gem_timeline *tl; | 3326 | struct i915_gem_timeline *tl; |
| 3303 | 3327 | ||
| @@ -4218,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) | |||
| 4218 | 4242 | ||
| 4219 | mapping = obj->base.filp->f_mapping; | 4243 | mapping = obj->base.filp->f_mapping; |
| 4220 | mapping_set_gfp_mask(mapping, mask); | 4244 | mapping_set_gfp_mask(mapping, mask); |
| 4245 | GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); | ||
| 4221 | 4246 | ||
| 4222 | i915_gem_object_init(obj, &i915_gem_object_ops); | 4247 | i915_gem_object_init(obj, &i915_gem_object_ops); |
| 4223 | 4248 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a3e59c8ef27b..9ad13eeed904 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -546,11 +546,12 @@ repeat: | |||
| 546 | } | 546 | } |
| 547 | 547 | ||
| 548 | static int | 548 | static int |
| 549 | i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | 549 | i915_gem_execbuffer_relocate_entry(struct i915_vma *vma, |
| 550 | struct eb_vmas *eb, | 550 | struct eb_vmas *eb, |
| 551 | struct drm_i915_gem_relocation_entry *reloc, | 551 | struct drm_i915_gem_relocation_entry *reloc, |
| 552 | struct reloc_cache *cache) | 552 | struct reloc_cache *cache) |
| 553 | { | 553 | { |
| 554 | struct drm_i915_gem_object *obj = vma->obj; | ||
| 554 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | 555 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
| 555 | struct drm_gem_object *target_obj; | 556 | struct drm_gem_object *target_obj; |
| 556 | struct drm_i915_gem_object *target_i915_obj; | 557 | struct drm_i915_gem_object *target_i915_obj; |
| @@ -628,6 +629,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
| 628 | return -EINVAL; | 629 | return -EINVAL; |
| 629 | } | 630 | } |
| 630 | 631 | ||
| 632 | /* | ||
| 633 | * If we write into the object, we need to force the synchronisation | ||
| 634 | * barrier, either with an asynchronous clflush or if we executed the | ||
| 635 | * patching using the GPU (though that should be serialised by the | ||
| 636 | * timeline). To be completely sure, and since we are required to | ||
| 637 | * do relocations we are already stalling, disable the user's opt | ||
| 638 | * of our synchronisation. | ||
| 639 | */ | ||
| 640 | vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC; | ||
| 641 | |||
| 631 | ret = relocate_entry(obj, reloc, cache, target_offset); | 642 | ret = relocate_entry(obj, reloc, cache, target_offset); |
| 632 | if (ret) | 643 | if (ret) |
| 633 | return ret; | 644 | return ret; |
| @@ -678,7 +689,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, | |||
| 678 | do { | 689 | do { |
| 679 | u64 offset = r->presumed_offset; | 690 | u64 offset = r->presumed_offset; |
| 680 | 691 | ||
| 681 | ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache); | 692 | ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache); |
| 682 | if (ret) | 693 | if (ret) |
| 683 | goto out; | 694 | goto out; |
| 684 | 695 | ||
| @@ -726,7 +737,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, | |||
| 726 | 737 | ||
| 727 | reloc_cache_init(&cache, eb->i915); | 738 | reloc_cache_init(&cache, eb->i915); |
| 728 | for (i = 0; i < entry->relocation_count; i++) { | 739 | for (i = 0; i < entry->relocation_count; i++) { |
| 729 | ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache); | 740 | ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache); |
| 730 | if (ret) | 741 | if (ret) |
| 731 | break; | 742 | break; |
| 732 | } | 743 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index a0563e18d753..f1989b8792dd 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
| @@ -2191,6 +2191,101 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, | |||
| 2191 | gen8_set_pte(>t_base[i], scratch_pte); | 2191 | gen8_set_pte(>t_base[i], scratch_pte); |
| 2192 | } | 2192 | } |
| 2193 | 2193 | ||
| 2194 | static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) | ||
| 2195 | { | ||
| 2196 | struct drm_i915_private *dev_priv = vm->i915; | ||
| 2197 | |||
| 2198 | /* | ||
| 2199 | * Make sure the internal GAM fifo has been cleared of all GTT | ||
| 2200 | * writes before exiting stop_machine(). This guarantees that | ||
| 2201 | * any aperture accesses waiting to start in another process | ||
| 2202 | * cannot back up behind the GTT writes causing a hang. | ||
| 2203 | * The register can be any arbitrary GAM register. | ||
| 2204 | */ | ||
| 2205 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | ||
| 2206 | } | ||
| 2207 | |||
| 2208 | struct insert_page { | ||
| 2209 | struct i915_address_space *vm; | ||
| 2210 | dma_addr_t addr; | ||
| 2211 | u64 offset; | ||
| 2212 | enum i915_cache_level level; | ||
| 2213 | }; | ||
| 2214 | |||
| 2215 | static int bxt_vtd_ggtt_insert_page__cb(void *_arg) | ||
| 2216 | { | ||
| 2217 | struct insert_page *arg = _arg; | ||
| 2218 | |||
| 2219 | gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); | ||
| 2220 | bxt_vtd_ggtt_wa(arg->vm); | ||
| 2221 | |||
| 2222 | return 0; | ||
| 2223 | } | ||
| 2224 | |||
| 2225 | static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, | ||
| 2226 | dma_addr_t addr, | ||
| 2227 | u64 offset, | ||
| 2228 | enum i915_cache_level level, | ||
| 2229 | u32 unused) | ||
| 2230 | { | ||
| 2231 | struct insert_page arg = { vm, addr, offset, level }; | ||
| 2232 | |||
| 2233 | stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); | ||
| 2234 | } | ||
| 2235 | |||
| 2236 | struct insert_entries { | ||
| 2237 | struct i915_address_space *vm; | ||
| 2238 | struct sg_table *st; | ||
| 2239 | u64 start; | ||
| 2240 | enum i915_cache_level level; | ||
| 2241 | }; | ||
| 2242 | |||
| 2243 | static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) | ||
| 2244 | { | ||
| 2245 | struct insert_entries *arg = _arg; | ||
| 2246 | |||
| 2247 | gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0); | ||
| 2248 | bxt_vtd_ggtt_wa(arg->vm); | ||
| 2249 | |||
| 2250 | return 0; | ||
| 2251 | } | ||
| 2252 | |||
| 2253 | static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, | ||
| 2254 | struct sg_table *st, | ||
| 2255 | u64 start, | ||
| 2256 | enum i915_cache_level level, | ||
| 2257 | u32 unused) | ||
| 2258 | { | ||
| 2259 | struct insert_entries arg = { vm, st, start, level }; | ||
| 2260 | |||
| 2261 | stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); | ||
| 2262 | } | ||
| 2263 | |||
| 2264 | struct clear_range { | ||
| 2265 | struct i915_address_space *vm; | ||
| 2266 | u64 start; | ||
| 2267 | u64 length; | ||
| 2268 | }; | ||
| 2269 | |||
| 2270 | static int bxt_vtd_ggtt_clear_range__cb(void *_arg) | ||
| 2271 | { | ||
| 2272 | struct clear_range *arg = _arg; | ||
| 2273 | |||
| 2274 | gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); | ||
| 2275 | bxt_vtd_ggtt_wa(arg->vm); | ||
| 2276 | |||
| 2277 | return 0; | ||
| 2278 | } | ||
| 2279 | |||
| 2280 | static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm, | ||
| 2281 | u64 start, | ||
| 2282 | u64 length) | ||
| 2283 | { | ||
| 2284 | struct clear_range arg = { vm, start, length }; | ||
| 2285 | |||
| 2286 | stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL); | ||
| 2287 | } | ||
| 2288 | |||
| 2194 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, | 2289 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
| 2195 | u64 start, u64 length) | 2290 | u64 start, u64 length) |
| 2196 | { | 2291 | { |
| @@ -2313,7 +2408,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, | |||
| 2313 | appgtt->base.allocate_va_range) { | 2408 | appgtt->base.allocate_va_range) { |
| 2314 | ret = appgtt->base.allocate_va_range(&appgtt->base, | 2409 | ret = appgtt->base.allocate_va_range(&appgtt->base, |
| 2315 | vma->node.start, | 2410 | vma->node.start, |
| 2316 | vma->node.size); | 2411 | vma->size); |
| 2317 | if (ret) | 2412 | if (ret) |
| 2318 | goto err_pages; | 2413 | goto err_pages; |
| 2319 | } | 2414 | } |
| @@ -2785,6 +2880,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) | |||
| 2785 | 2880 | ||
| 2786 | ggtt->base.insert_entries = gen8_ggtt_insert_entries; | 2881 | ggtt->base.insert_entries = gen8_ggtt_insert_entries; |
| 2787 | 2882 | ||
| 2883 | /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ | ||
| 2884 | if (intel_ggtt_update_needs_vtd_wa(dev_priv)) { | ||
| 2885 | ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; | ||
| 2886 | ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL; | ||
| 2887 | if (ggtt->base.clear_range != nop_clear_range) | ||
| 2888 | ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL; | ||
| 2889 | } | ||
| 2890 | |||
| 2788 | ggtt->invalidate = gen6_ggtt_invalidate; | 2891 | ggtt->invalidate = gen6_ggtt_invalidate; |
| 2789 | 2892 | ||
| 2790 | return ggtt_probe_common(ggtt, size); | 2893 | return ggtt_probe_common(ggtt, size); |
| @@ -2997,7 +3100,8 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915) | |||
| 2997 | 3100 | ||
| 2998 | void i915_ggtt_disable_guc(struct drm_i915_private *i915) | 3101 | void i915_ggtt_disable_guc(struct drm_i915_private *i915) |
| 2999 | { | 3102 | { |
| 3000 | i915->ggtt.invalidate = gen6_ggtt_invalidate; | 3103 | if (i915->ggtt.invalidate == guc_ggtt_invalidate) |
| 3104 | i915->ggtt.invalidate = gen6_ggtt_invalidate; | ||
| 3001 | } | 3105 | } |
| 3002 | 3106 | ||
| 3003 | void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) | 3107 | void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) |
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 5ddbc9499775..a74d0ac737cb 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c | |||
| @@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
| 623 | * GPU processing the request, we never over-estimate the | 623 | * GPU processing the request, we never over-estimate the |
| 624 | * position of the head. | 624 | * position of the head. |
| 625 | */ | 625 | */ |
| 626 | req->head = req->ring->tail; | 626 | req->head = req->ring->emit; |
| 627 | 627 | ||
| 628 | /* Check that we didn't interrupt ourselves with a new request */ | 628 | /* Check that we didn't interrupt ourselves with a new request */ |
| 629 | GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); | 629 | GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); |
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 129ed303a6c4..57d9f7f4ef15 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c | |||
| @@ -59,9 +59,6 @@ static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock) | |||
| 59 | return; | 59 | return; |
| 60 | 60 | ||
| 61 | mutex_unlock(&dev->struct_mutex); | 61 | mutex_unlock(&dev->struct_mutex); |
| 62 | |||
| 63 | /* expedite the RCU grace period to free some request slabs */ | ||
| 64 | synchronize_rcu_expedited(); | ||
| 65 | } | 62 | } |
| 66 | 63 | ||
| 67 | static bool any_vma_pinned(struct drm_i915_gem_object *obj) | 64 | static bool any_vma_pinned(struct drm_i915_gem_object *obj) |
| @@ -274,8 +271,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) | |||
| 274 | I915_SHRINK_ACTIVE); | 271 | I915_SHRINK_ACTIVE); |
| 275 | intel_runtime_pm_put(dev_priv); | 272 | intel_runtime_pm_put(dev_priv); |
| 276 | 273 | ||
| 277 | synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */ | ||
| 278 | |||
| 279 | return freed; | 274 | return freed; |
| 280 | } | 275 | } |
| 281 | 276 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index a0d6d4317a49..fb5231f98c0d 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
| @@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, | |||
| 278 | obj->mm.quirked = false; | 278 | obj->mm.quirked = false; |
| 279 | } | 279 | } |
| 280 | if (!i915_gem_object_is_tiled(obj)) { | 280 | if (!i915_gem_object_is_tiled(obj)) { |
| 281 | GEM_BUG_ON(!obj->mm.quirked); | 281 | GEM_BUG_ON(obj->mm.quirked); |
| 282 | __i915_gem_object_pin_pages(obj); | 282 | __i915_gem_object_pin_pages(obj); |
| 283 | obj->mm.quirked = true; | 283 | obj->mm.quirked = true; |
| 284 | } | 284 | } |
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 1642fff9cf13..ab5140ba108d 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c | |||
| @@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client, | |||
| 480 | GEM_BUG_ON(freespace < wqi_size); | 480 | GEM_BUG_ON(freespace < wqi_size); |
| 481 | 481 | ||
| 482 | /* The GuC firmware wants the tail index in QWords, not bytes */ | 482 | /* The GuC firmware wants the tail index in QWords, not bytes */ |
| 483 | tail = rq->tail; | 483 | tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3; |
| 484 | assert_ring_tail_valid(rq->ring, rq->tail); | ||
| 485 | tail >>= 3; | ||
| 486 | GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); | 484 | GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); |
| 487 | 485 | ||
| 488 | /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we | 486 | /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index fd97fe00cd0d..190f6aa5d15e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -2953,7 +2953,6 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) | |||
| 2953 | u32 pipestat_mask; | 2953 | u32 pipestat_mask; |
| 2954 | u32 enable_mask; | 2954 | u32 enable_mask; |
| 2955 | enum pipe pipe; | 2955 | enum pipe pipe; |
| 2956 | u32 val; | ||
| 2957 | 2956 | ||
| 2958 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | | 2957 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | |
| 2959 | PIPE_CRC_DONE_INTERRUPT_STATUS; | 2958 | PIPE_CRC_DONE_INTERRUPT_STATUS; |
| @@ -2964,18 +2963,16 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) | |||
| 2964 | 2963 | ||
| 2965 | enable_mask = I915_DISPLAY_PORT_INTERRUPT | | 2964 | enable_mask = I915_DISPLAY_PORT_INTERRUPT | |
| 2966 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 2965 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
| 2967 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | 2966 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
| 2967 | I915_LPE_PIPE_A_INTERRUPT | | ||
| 2968 | I915_LPE_PIPE_B_INTERRUPT; | ||
| 2969 | |||
| 2968 | if (IS_CHERRYVIEW(dev_priv)) | 2970 | if (IS_CHERRYVIEW(dev_priv)) |
| 2969 | enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; | 2971 | enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | |
| 2972 | I915_LPE_PIPE_C_INTERRUPT; | ||
| 2970 | 2973 | ||
| 2971 | WARN_ON(dev_priv->irq_mask != ~0); | 2974 | WARN_ON(dev_priv->irq_mask != ~0); |
| 2972 | 2975 | ||
| 2973 | val = (I915_LPE_PIPE_A_INTERRUPT | | ||
| 2974 | I915_LPE_PIPE_B_INTERRUPT | | ||
| 2975 | I915_LPE_PIPE_C_INTERRUPT); | ||
| 2976 | |||
| 2977 | enable_mask |= val; | ||
| 2978 | |||
| 2979 | dev_priv->irq_mask = ~enable_mask; | 2976 | dev_priv->irq_mask = ~enable_mask; |
| 2980 | 2977 | ||
| 2981 | GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); | 2978 | GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); |
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index f87b0c4e564d..1a78363c7f4a 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c | |||
| @@ -208,7 +208,7 @@ static const struct intel_device_info intel_ironlake_d_info = { | |||
| 208 | static const struct intel_device_info intel_ironlake_m_info = { | 208 | static const struct intel_device_info intel_ironlake_m_info = { |
| 209 | GEN5_FEATURES, | 209 | GEN5_FEATURES, |
| 210 | .platform = INTEL_IRONLAKE, | 210 | .platform = INTEL_IRONLAKE, |
| 211 | .is_mobile = 1, | 211 | .is_mobile = 1, .has_fbc = 1, |
| 212 | }; | 212 | }; |
| 213 | 213 | ||
| 214 | #define GEN6_FEATURES \ | 214 | #define GEN6_FEATURES \ |
| @@ -390,7 +390,6 @@ static const struct intel_device_info intel_skylake_gt3_info = { | |||
| 390 | .has_hw_contexts = 1, \ | 390 | .has_hw_contexts = 1, \ |
| 391 | .has_logical_ring_contexts = 1, \ | 391 | .has_logical_ring_contexts = 1, \ |
| 392 | .has_guc = 1, \ | 392 | .has_guc = 1, \ |
| 393 | .has_decoupled_mmio = 1, \ | ||
| 394 | .has_aliasing_ppgtt = 1, \ | 393 | .has_aliasing_ppgtt = 1, \ |
| 395 | .has_full_ppgtt = 1, \ | 394 | .has_full_ppgtt = 1, \ |
| 396 | .has_full_48bit_ppgtt = 1, \ | 395 | .has_full_48bit_ppgtt = 1, \ |
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h index c0cb2974caac..2cfe96d3e5d1 100644 --- a/drivers/gpu/drm/i915/i915_pvinfo.h +++ b/drivers/gpu/drm/i915/i915_pvinfo.h | |||
| @@ -36,10 +36,6 @@ | |||
| 36 | #define VGT_VERSION_MAJOR 1 | 36 | #define VGT_VERSION_MAJOR 1 |
| 37 | #define VGT_VERSION_MINOR 0 | 37 | #define VGT_VERSION_MINOR 0 |
| 38 | 38 | ||
| 39 | #define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor)) | ||
| 40 | #define INTEL_VGT_IF_VERSION \ | ||
| 41 | INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) | ||
| 42 | |||
| 43 | /* | 39 | /* |
| 44 | * notifications from guest to vgpu device model | 40 | * notifications from guest to vgpu device model |
| 45 | */ | 41 | */ |
| @@ -55,8 +51,8 @@ enum vgt_g2v_type { | |||
| 55 | 51 | ||
| 56 | struct vgt_if { | 52 | struct vgt_if { |
| 57 | u64 magic; /* VGT_MAGIC */ | 53 | u64 magic; /* VGT_MAGIC */ |
| 58 | uint16_t version_major; | 54 | u16 version_major; |
| 59 | uint16_t version_minor; | 55 | u16 version_minor; |
| 60 | u32 vgt_id; /* ID of vGT instance */ | 56 | u32 vgt_id; /* ID of vGT instance */ |
| 61 | u32 rsv1[12]; /* pad to offset 0x40 */ | 57 | u32 rsv1[12]; /* pad to offset 0x40 */ |
| 62 | /* | 58 | /* |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5a7c63e64381..65b837e96fe6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -8280,7 +8280,7 @@ enum { | |||
| 8280 | 8280 | ||
| 8281 | /* MIPI DSI registers */ | 8281 | /* MIPI DSI registers */ |
| 8282 | 8282 | ||
| 8283 | #define _MIPI_PORT(port, a, c) ((port) ? c : a) /* ports A and C only */ | 8283 | #define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ |
| 8284 | #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) | 8284 | #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) |
| 8285 | 8285 | ||
| 8286 | #define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004) | 8286 | #define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004) |
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 4ab8a973b61f..2e739018fb4c 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c | |||
| @@ -60,8 +60,8 @@ | |||
| 60 | */ | 60 | */ |
| 61 | void i915_check_vgpu(struct drm_i915_private *dev_priv) | 61 | void i915_check_vgpu(struct drm_i915_private *dev_priv) |
| 62 | { | 62 | { |
| 63 | uint64_t magic; | 63 | u64 magic; |
| 64 | uint32_t version; | 64 | u16 version_major; |
| 65 | 65 | ||
| 66 | BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); | 66 | BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); |
| 67 | 67 | ||
| @@ -69,10 +69,8 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv) | |||
| 69 | if (magic != VGT_MAGIC) | 69 | if (magic != VGT_MAGIC) |
| 70 | return; | 70 | return; |
| 71 | 71 | ||
| 72 | version = INTEL_VGT_IF_VERSION_ENCODE( | 72 | version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major)); |
| 73 | __raw_i915_read16(dev_priv, vgtif_reg(version_major)), | 73 | if (version_major < VGT_VERSION_MAJOR) { |
| 74 | __raw_i915_read16(dev_priv, vgtif_reg(version_minor))); | ||
| 75 | if (version != INTEL_VGT_IF_VERSION) { | ||
| 76 | DRM_INFO("VGT interface version mismatch!\n"); | 74 | DRM_INFO("VGT interface version mismatch!\n"); |
| 77 | return; | 75 | return; |
| 78 | } | 76 | } |
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 1aba47024656..f066e2d785f5 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c | |||
| @@ -650,6 +650,11 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
| 650 | break; | 650 | break; |
| 651 | } | 651 | } |
| 652 | 652 | ||
| 653 | if (!ret) { | ||
| 654 | ret = i915_gem_active_retire(&vma->last_fence, | ||
| 655 | &vma->vm->i915->drm.struct_mutex); | ||
| 656 | } | ||
| 657 | |||
| 653 | __i915_vma_unpin(vma); | 658 | __i915_vma_unpin(vma); |
| 654 | if (ret) | 659 | if (ret) |
| 655 | return ret; | 660 | return ret; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3617927af269..9106ea32b048 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc, | |||
| 120 | static void skylake_pfit_enable(struct intel_crtc *crtc); | 120 | static void skylake_pfit_enable(struct intel_crtc *crtc); |
| 121 | static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); | 121 | static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); |
| 122 | static void ironlake_pfit_enable(struct intel_crtc *crtc); | 122 | static void ironlake_pfit_enable(struct intel_crtc *crtc); |
| 123 | static void intel_modeset_setup_hw_state(struct drm_device *dev); | 123 | static void intel_modeset_setup_hw_state(struct drm_device *dev, |
| 124 | struct drm_modeset_acquire_ctx *ctx); | ||
| 124 | static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); | 125 | static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); |
| 125 | 126 | ||
| 126 | struct intel_limit { | 127 | struct intel_limit { |
| @@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev, | |||
| 3449 | struct drm_crtc *crtc; | 3450 | struct drm_crtc *crtc; |
| 3450 | int i, ret; | 3451 | int i, ret; |
| 3451 | 3452 | ||
| 3452 | intel_modeset_setup_hw_state(dev); | 3453 | intel_modeset_setup_hw_state(dev, ctx); |
| 3453 | i915_redisable_vga(to_i915(dev)); | 3454 | i915_redisable_vga(to_i915(dev)); |
| 3454 | 3455 | ||
| 3455 | if (!state) | 3456 | if (!state) |
| @@ -4598,7 +4599,7 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe) | |||
| 4598 | 4599 | ||
| 4599 | static int | 4600 | static int |
| 4600 | skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, | 4601 | skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, |
| 4601 | unsigned scaler_user, int *scaler_id, unsigned int rotation, | 4602 | unsigned int scaler_user, int *scaler_id, |
| 4602 | int src_w, int src_h, int dst_w, int dst_h) | 4603 | int src_w, int src_h, int dst_w, int dst_h) |
| 4603 | { | 4604 | { |
| 4604 | struct intel_crtc_scaler_state *scaler_state = | 4605 | struct intel_crtc_scaler_state *scaler_state = |
| @@ -4607,9 +4608,12 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, | |||
| 4607 | to_intel_crtc(crtc_state->base.crtc); | 4608 | to_intel_crtc(crtc_state->base.crtc); |
| 4608 | int need_scaling; | 4609 | int need_scaling; |
| 4609 | 4610 | ||
| 4610 | need_scaling = drm_rotation_90_or_270(rotation) ? | 4611 | /* |
| 4611 | (src_h != dst_w || src_w != dst_h): | 4612 | * Src coordinates are already rotated by 270 degrees for |
| 4612 | (src_w != dst_w || src_h != dst_h); | 4613 | * the 90/270 degree plane rotation cases (to match the |
| 4614 | * GTT mapping), hence no need to account for rotation here. | ||
| 4615 | */ | ||
| 4616 | need_scaling = src_w != dst_w || src_h != dst_h; | ||
| 4613 | 4617 | ||
| 4614 | /* | 4618 | /* |
| 4615 | * if plane is being disabled or scaler is no more required or force detach | 4619 | * if plane is being disabled or scaler is no more required or force detach |
| @@ -4671,7 +4675,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state) | |||
| 4671 | const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; | 4675 | const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; |
| 4672 | 4676 | ||
| 4673 | return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, | 4677 | return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, |
| 4674 | &state->scaler_state.scaler_id, DRM_ROTATE_0, | 4678 | &state->scaler_state.scaler_id, |
| 4675 | state->pipe_src_w, state->pipe_src_h, | 4679 | state->pipe_src_w, state->pipe_src_h, |
| 4676 | adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); | 4680 | adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); |
| 4677 | } | 4681 | } |
| @@ -4700,7 +4704,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, | |||
| 4700 | ret = skl_update_scaler(crtc_state, force_detach, | 4704 | ret = skl_update_scaler(crtc_state, force_detach, |
| 4701 | drm_plane_index(&intel_plane->base), | 4705 | drm_plane_index(&intel_plane->base), |
| 4702 | &plane_state->scaler_id, | 4706 | &plane_state->scaler_id, |
| 4703 | plane_state->base.rotation, | ||
| 4704 | drm_rect_width(&plane_state->base.src) >> 16, | 4707 | drm_rect_width(&plane_state->base.src) >> 16, |
| 4705 | drm_rect_height(&plane_state->base.src) >> 16, | 4708 | drm_rect_height(&plane_state->base.src) >> 16, |
| 4706 | drm_rect_width(&plane_state->base.dst), | 4709 | drm_rect_width(&plane_state->base.dst), |
| @@ -5823,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, | |||
| 5823 | intel_update_watermarks(intel_crtc); | 5826 | intel_update_watermarks(intel_crtc); |
| 5824 | } | 5827 | } |
| 5825 | 5828 | ||
| 5826 | static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) | 5829 | static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, |
| 5830 | struct drm_modeset_acquire_ctx *ctx) | ||
| 5827 | { | 5831 | { |
| 5828 | struct intel_encoder *encoder; | 5832 | struct intel_encoder *encoder; |
| 5829 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5833 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| @@ -5853,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) | |||
| 5853 | return; | 5857 | return; |
| 5854 | } | 5858 | } |
| 5855 | 5859 | ||
| 5856 | state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; | 5860 | state->acquire_ctx = ctx; |
| 5857 | 5861 | ||
| 5858 | /* Everything's already locked, -EDEADLK can't happen. */ | 5862 | /* Everything's already locked, -EDEADLK can't happen. */ |
| 5859 | crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); | 5863 | crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); |
| @@ -6101,7 +6105,7 @@ retry: | |||
| 6101 | pipe_config->fdi_lanes = lane; | 6105 | pipe_config->fdi_lanes = lane; |
| 6102 | 6106 | ||
| 6103 | intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, | 6107 | intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, |
| 6104 | link_bw, &pipe_config->fdi_m_n); | 6108 | link_bw, &pipe_config->fdi_m_n, false); |
| 6105 | 6109 | ||
| 6106 | ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); | 6110 | ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); |
| 6107 | if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { | 6111 | if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { |
| @@ -6277,7 +6281,8 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) | |||
| 6277 | } | 6281 | } |
| 6278 | 6282 | ||
| 6279 | static void compute_m_n(unsigned int m, unsigned int n, | 6283 | static void compute_m_n(unsigned int m, unsigned int n, |
| 6280 | uint32_t *ret_m, uint32_t *ret_n) | 6284 | uint32_t *ret_m, uint32_t *ret_n, |
| 6285 | bool reduce_m_n) | ||
| 6281 | { | 6286 | { |
| 6282 | /* | 6287 | /* |
| 6283 | * Reduce M/N as much as possible without loss in precision. Several DP | 6288 | * Reduce M/N as much as possible without loss in precision. Several DP |
| @@ -6285,9 +6290,11 @@ static void compute_m_n(unsigned int m, unsigned int n, | |||
| 6285 | * values. The passed in values are more likely to have the least | 6290 | * values. The passed in values are more likely to have the least |
| 6286 | * significant bits zero than M after rounding below, so do this first. | 6291 | * significant bits zero than M after rounding below, so do this first. |
| 6287 | */ | 6292 | */ |
| 6288 | while ((m & 1) == 0 && (n & 1) == 0) { | 6293 | if (reduce_m_n) { |
| 6289 | m >>= 1; | 6294 | while ((m & 1) == 0 && (n & 1) == 0) { |
| 6290 | n >>= 1; | 6295 | m >>= 1; |
| 6296 | n >>= 1; | ||
| 6297 | } | ||
| 6291 | } | 6298 | } |
| 6292 | 6299 | ||
| 6293 | *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); | 6300 | *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); |
| @@ -6298,16 +6305,19 @@ static void compute_m_n(unsigned int m, unsigned int n, | |||
| 6298 | void | 6305 | void |
| 6299 | intel_link_compute_m_n(int bits_per_pixel, int nlanes, | 6306 | intel_link_compute_m_n(int bits_per_pixel, int nlanes, |
| 6300 | int pixel_clock, int link_clock, | 6307 | int pixel_clock, int link_clock, |
| 6301 | struct intel_link_m_n *m_n) | 6308 | struct intel_link_m_n *m_n, |
| 6309 | bool reduce_m_n) | ||
| 6302 | { | 6310 | { |
| 6303 | m_n->tu = 64; | 6311 | m_n->tu = 64; |
| 6304 | 6312 | ||
| 6305 | compute_m_n(bits_per_pixel * pixel_clock, | 6313 | compute_m_n(bits_per_pixel * pixel_clock, |
| 6306 | link_clock * nlanes * 8, | 6314 | link_clock * nlanes * 8, |
| 6307 | &m_n->gmch_m, &m_n->gmch_n); | 6315 | &m_n->gmch_m, &m_n->gmch_n, |
| 6316 | reduce_m_n); | ||
| 6308 | 6317 | ||
| 6309 | compute_m_n(pixel_clock, link_clock, | 6318 | compute_m_n(pixel_clock, link_clock, |
| 6310 | &m_n->link_m, &m_n->link_n); | 6319 | &m_n->link_m, &m_n->link_n, |
| 6320 | reduce_m_n); | ||
| 6311 | } | 6321 | } |
| 6312 | 6322 | ||
| 6313 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) | 6323 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) |
| @@ -12197,6 +12207,15 @@ static void update_scanline_offset(struct intel_crtc *crtc) | |||
| 12197 | * type. For DP ports it behaves like most other platforms, but on HDMI | 12207 | * type. For DP ports it behaves like most other platforms, but on HDMI |
| 12198 | * there's an extra 1 line difference. So we need to add two instead of | 12208 | * there's an extra 1 line difference. So we need to add two instead of |
| 12199 | * one to the value. | 12209 | * one to the value. |
| 12210 | * | ||
| 12211 | * On VLV/CHV DSI the scanline counter would appear to increment | ||
| 12212 | * approx. 1/3 of a scanline before start of vblank. Unfortunately | ||
| 12213 | * that means we can't tell whether we're in vblank or not while | ||
| 12214 | * we're on that particular line. We must still set scanline_offset | ||
| 12215 | * to 1 so that the vblank timestamps come out correct when we query | ||
| 12216 | * the scanline counter from within the vblank interrupt handler. | ||
| 12217 | * However if queried just before the start of vblank we'll get an | ||
| 12218 | * answer that's slightly in the future. | ||
| 12200 | */ | 12219 | */ |
| 12201 | if (IS_GEN2(dev_priv)) { | 12220 | if (IS_GEN2(dev_priv)) { |
| 12202 | const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; | 12221 | const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; |
| @@ -15013,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev) | |||
| 15013 | intel_setup_outputs(dev_priv); | 15032 | intel_setup_outputs(dev_priv); |
| 15014 | 15033 | ||
| 15015 | drm_modeset_lock_all(dev); | 15034 | drm_modeset_lock_all(dev); |
| 15016 | intel_modeset_setup_hw_state(dev); | 15035 | intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); |
| 15017 | drm_modeset_unlock_all(dev); | 15036 | drm_modeset_unlock_all(dev); |
| 15018 | 15037 | ||
| 15019 | for_each_intel_crtc(dev, crtc) { | 15038 | for_each_intel_crtc(dev, crtc) { |
| @@ -15050,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev) | |||
| 15050 | return 0; | 15069 | return 0; |
| 15051 | } | 15070 | } |
| 15052 | 15071 | ||
| 15053 | static void intel_enable_pipe_a(struct drm_device *dev) | 15072 | static void intel_enable_pipe_a(struct drm_device *dev, |
| 15073 | struct drm_modeset_acquire_ctx *ctx) | ||
| 15054 | { | 15074 | { |
| 15055 | struct intel_connector *connector; | 15075 | struct intel_connector *connector; |
| 15056 | struct drm_connector_list_iter conn_iter; | 15076 | struct drm_connector_list_iter conn_iter; |
| 15057 | struct drm_connector *crt = NULL; | 15077 | struct drm_connector *crt = NULL; |
| 15058 | struct intel_load_detect_pipe load_detect_temp; | 15078 | struct intel_load_detect_pipe load_detect_temp; |
| 15059 | struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; | ||
| 15060 | int ret; | 15079 | int ret; |
| 15061 | 15080 | ||
| 15062 | /* We can't just switch on the pipe A, we need to set things up with a | 15081 | /* We can't just switch on the pipe A, we need to set things up with a |
| @@ -15128,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv, | |||
| 15128 | (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); | 15147 | (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); |
| 15129 | } | 15148 | } |
| 15130 | 15149 | ||
| 15131 | static void intel_sanitize_crtc(struct intel_crtc *crtc) | 15150 | static void intel_sanitize_crtc(struct intel_crtc *crtc, |
| 15151 | struct drm_modeset_acquire_ctx *ctx) | ||
| 15132 | { | 15152 | { |
| 15133 | struct drm_device *dev = crtc->base.dev; | 15153 | struct drm_device *dev = crtc->base.dev; |
| 15134 | struct drm_i915_private *dev_priv = to_i915(dev); | 15154 | struct drm_i915_private *dev_priv = to_i915(dev); |
| @@ -15174,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
| 15174 | plane = crtc->plane; | 15194 | plane = crtc->plane; |
| 15175 | crtc->base.primary->state->visible = true; | 15195 | crtc->base.primary->state->visible = true; |
| 15176 | crtc->plane = !plane; | 15196 | crtc->plane = !plane; |
| 15177 | intel_crtc_disable_noatomic(&crtc->base); | 15197 | intel_crtc_disable_noatomic(&crtc->base, ctx); |
| 15178 | crtc->plane = plane; | 15198 | crtc->plane = plane; |
| 15179 | } | 15199 | } |
| 15180 | 15200 | ||
| @@ -15184,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
| 15184 | * resume. Force-enable the pipe to fix this, the update_dpms | 15204 | * resume. Force-enable the pipe to fix this, the update_dpms |
| 15185 | * call below we restore the pipe to the right state, but leave | 15205 | * call below we restore the pipe to the right state, but leave |
| 15186 | * the required bits on. */ | 15206 | * the required bits on. */ |
| 15187 | intel_enable_pipe_a(dev); | 15207 | intel_enable_pipe_a(dev, ctx); |
| 15188 | } | 15208 | } |
| 15189 | 15209 | ||
| 15190 | /* Adjust the state of the output pipe according to whether we | 15210 | /* Adjust the state of the output pipe according to whether we |
| 15191 | * have active connectors/encoders. */ | 15211 | * have active connectors/encoders. */ |
| 15192 | if (crtc->active && !intel_crtc_has_encoders(crtc)) | 15212 | if (crtc->active && !intel_crtc_has_encoders(crtc)) |
| 15193 | intel_crtc_disable_noatomic(&crtc->base); | 15213 | intel_crtc_disable_noatomic(&crtc->base, ctx); |
| 15194 | 15214 | ||
| 15195 | if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { | 15215 | if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { |
| 15196 | /* | 15216 | /* |
| @@ -15488,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) | |||
| 15488 | * and sanitizes it to the current state | 15508 | * and sanitizes it to the current state |
| 15489 | */ | 15509 | */ |
| 15490 | static void | 15510 | static void |
| 15491 | intel_modeset_setup_hw_state(struct drm_device *dev) | 15511 | intel_modeset_setup_hw_state(struct drm_device *dev, |
| 15512 | struct drm_modeset_acquire_ctx *ctx) | ||
| 15492 | { | 15513 | { |
| 15493 | struct drm_i915_private *dev_priv = to_i915(dev); | 15514 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 15494 | enum pipe pipe; | 15515 | enum pipe pipe; |
| @@ -15508,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev) | |||
| 15508 | for_each_pipe(dev_priv, pipe) { | 15529 | for_each_pipe(dev_priv, pipe) { |
| 15509 | crtc = intel_get_crtc_for_pipe(dev_priv, pipe); | 15530 | crtc = intel_get_crtc_for_pipe(dev_priv, pipe); |
| 15510 | 15531 | ||
| 15511 | intel_sanitize_crtc(crtc); | 15532 | intel_sanitize_crtc(crtc, ctx); |
| 15512 | intel_dump_pipe_config(crtc, crtc->config, | 15533 | intel_dump_pipe_config(crtc, crtc->config, |
| 15513 | "[setup_hw_state]"); | 15534 | "[setup_hw_state]"); |
| 15514 | } | 15535 | } |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index ee77b519835c..fc691b8b317c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -1507,37 +1507,6 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp) | |||
| 1507 | DRM_DEBUG_KMS("common rates: %s\n", str); | 1507 | DRM_DEBUG_KMS("common rates: %s\n", str); |
| 1508 | } | 1508 | } |
| 1509 | 1509 | ||
| 1510 | bool | ||
| 1511 | __intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc) | ||
| 1512 | { | ||
| 1513 | u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI : | ||
| 1514 | DP_SINK_OUI; | ||
| 1515 | |||
| 1516 | return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) == | ||
| 1517 | sizeof(*desc); | ||
| 1518 | } | ||
| 1519 | |||
| 1520 | bool intel_dp_read_desc(struct intel_dp *intel_dp) | ||
| 1521 | { | ||
| 1522 | struct intel_dp_desc *desc = &intel_dp->desc; | ||
| 1523 | bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & | ||
| 1524 | DP_OUI_SUPPORT; | ||
| 1525 | int dev_id_len; | ||
| 1526 | |||
| 1527 | if (!__intel_dp_read_desc(intel_dp, desc)) | ||
| 1528 | return false; | ||
| 1529 | |||
| 1530 | dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id)); | ||
| 1531 | DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n", | ||
| 1532 | drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink", | ||
| 1533 | (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)", | ||
| 1534 | dev_id_len, desc->device_id, | ||
| 1535 | desc->hw_rev >> 4, desc->hw_rev & 0xf, | ||
| 1536 | desc->sw_major_rev, desc->sw_minor_rev); | ||
| 1537 | |||
| 1538 | return true; | ||
| 1539 | } | ||
| 1540 | |||
| 1541 | static int rate_to_index(int find, const int *rates) | 1510 | static int rate_to_index(int find, const int *rates) |
| 1542 | { | 1511 | { |
| 1543 | int i = 0; | 1512 | int i = 0; |
| @@ -1624,6 +1593,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
| 1624 | int common_rates[DP_MAX_SUPPORTED_RATES] = {}; | 1593 | int common_rates[DP_MAX_SUPPORTED_RATES] = {}; |
| 1625 | int common_len; | 1594 | int common_len; |
| 1626 | uint8_t link_bw, rate_select; | 1595 | uint8_t link_bw, rate_select; |
| 1596 | bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, | ||
| 1597 | DP_DPCD_QUIRK_LIMITED_M_N); | ||
| 1627 | 1598 | ||
| 1628 | common_len = intel_dp_common_rates(intel_dp, common_rates); | 1599 | common_len = intel_dp_common_rates(intel_dp, common_rates); |
| 1629 | 1600 | ||
| @@ -1753,7 +1724,8 @@ found: | |||
| 1753 | intel_link_compute_m_n(bpp, lane_count, | 1724 | intel_link_compute_m_n(bpp, lane_count, |
| 1754 | adjusted_mode->crtc_clock, | 1725 | adjusted_mode->crtc_clock, |
| 1755 | pipe_config->port_clock, | 1726 | pipe_config->port_clock, |
| 1756 | &pipe_config->dp_m_n); | 1727 | &pipe_config->dp_m_n, |
| 1728 | reduce_m_n); | ||
| 1757 | 1729 | ||
| 1758 | if (intel_connector->panel.downclock_mode != NULL && | 1730 | if (intel_connector->panel.downclock_mode != NULL && |
| 1759 | dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { | 1731 | dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { |
| @@ -1761,7 +1733,8 @@ found: | |||
| 1761 | intel_link_compute_m_n(bpp, lane_count, | 1733 | intel_link_compute_m_n(bpp, lane_count, |
| 1762 | intel_connector->panel.downclock_mode->clock, | 1734 | intel_connector->panel.downclock_mode->clock, |
| 1763 | pipe_config->port_clock, | 1735 | pipe_config->port_clock, |
| 1764 | &pipe_config->dp_m2_n2); | 1736 | &pipe_config->dp_m2_n2, |
| 1737 | reduce_m_n); | ||
| 1765 | } | 1738 | } |
| 1766 | 1739 | ||
| 1767 | /* | 1740 | /* |
| @@ -3622,7 +3595,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) | |||
| 3622 | if (!intel_dp_read_dpcd(intel_dp)) | 3595 | if (!intel_dp_read_dpcd(intel_dp)) |
| 3623 | return false; | 3596 | return false; |
| 3624 | 3597 | ||
| 3625 | intel_dp_read_desc(intel_dp); | 3598 | drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, |
| 3599 | drm_dp_is_branch(intel_dp->dpcd)); | ||
| 3626 | 3600 | ||
| 3627 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) | 3601 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) |
| 3628 | dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & | 3602 | dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & |
| @@ -4624,7 +4598,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) | |||
| 4624 | 4598 | ||
| 4625 | intel_dp_print_rates(intel_dp); | 4599 | intel_dp_print_rates(intel_dp); |
| 4626 | 4600 | ||
| 4627 | intel_dp_read_desc(intel_dp); | 4601 | drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, |
| 4602 | drm_dp_is_branch(intel_dp->dpcd)); | ||
| 4628 | 4603 | ||
| 4629 | intel_dp_configure_mst(intel_dp); | 4604 | intel_dp_configure_mst(intel_dp); |
| 4630 | 4605 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c index 6532e226db29..40ba3134545e 100644 --- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c | |||
| @@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector, | |||
| 119 | struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); | 119 | struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); |
| 120 | struct intel_panel *panel = &connector->panel; | 120 | struct intel_panel *panel = &connector->panel; |
| 121 | 121 | ||
| 122 | intel_dp_aux_enable_backlight(connector); | ||
| 123 | |||
| 124 | if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) | 122 | if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) |
| 125 | panel->backlight.max = 0xFFFF; | 123 | panel->backlight.max = 0xFFFF; |
| 126 | else | 124 | else |
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index c1f62eb07c07..989e25577ac0 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c | |||
| @@ -44,6 +44,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, | |||
| 44 | int lane_count, slots; | 44 | int lane_count, slots; |
| 45 | const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; | 45 | const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; |
| 46 | int mst_pbn; | 46 | int mst_pbn; |
| 47 | bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, | ||
| 48 | DP_DPCD_QUIRK_LIMITED_M_N); | ||
| 47 | 49 | ||
| 48 | pipe_config->has_pch_encoder = false; | 50 | pipe_config->has_pch_encoder = false; |
| 49 | bpp = 24; | 51 | bpp = 24; |
| @@ -75,7 +77,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, | |||
| 75 | intel_link_compute_m_n(bpp, lane_count, | 77 | intel_link_compute_m_n(bpp, lane_count, |
| 76 | adjusted_mode->crtc_clock, | 78 | adjusted_mode->crtc_clock, |
| 77 | pipe_config->port_clock, | 79 | pipe_config->port_clock, |
| 78 | &pipe_config->dp_m_n); | 80 | &pipe_config->dp_m_n, |
| 81 | reduce_m_n); | ||
| 79 | 82 | ||
| 80 | pipe_config->dp_m_n.tu = slots; | 83 | pipe_config->dp_m_n.tu = slots; |
| 81 | 84 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index aaee3949a422..f630c7af5020 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -906,14 +906,6 @@ enum link_m_n_set { | |||
| 906 | M2_N2 | 906 | M2_N2 |
| 907 | }; | 907 | }; |
| 908 | 908 | ||
| 909 | struct intel_dp_desc { | ||
| 910 | u8 oui[3]; | ||
| 911 | u8 device_id[6]; | ||
| 912 | u8 hw_rev; | ||
| 913 | u8 sw_major_rev; | ||
| 914 | u8 sw_minor_rev; | ||
| 915 | } __packed; | ||
| 916 | |||
| 917 | struct intel_dp_compliance_data { | 909 | struct intel_dp_compliance_data { |
| 918 | unsigned long edid; | 910 | unsigned long edid; |
| 919 | uint8_t video_pattern; | 911 | uint8_t video_pattern; |
| @@ -957,7 +949,7 @@ struct intel_dp { | |||
| 957 | /* Max link BW for the sink as per DPCD registers */ | 949 | /* Max link BW for the sink as per DPCD registers */ |
| 958 | int max_sink_link_bw; | 950 | int max_sink_link_bw; |
| 959 | /* sink or branch descriptor */ | 951 | /* sink or branch descriptor */ |
| 960 | struct intel_dp_desc desc; | 952 | struct drm_dp_desc desc; |
| 961 | struct drm_dp_aux aux; | 953 | struct drm_dp_aux aux; |
| 962 | enum intel_display_power_domain aux_power_domain; | 954 | enum intel_display_power_domain aux_power_domain; |
| 963 | uint8_t train_set[4]; | 955 | uint8_t train_set[4]; |
| @@ -1532,9 +1524,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count) | |||
| 1532 | } | 1524 | } |
| 1533 | 1525 | ||
| 1534 | bool intel_dp_read_dpcd(struct intel_dp *intel_dp); | 1526 | bool intel_dp_read_dpcd(struct intel_dp *intel_dp); |
| 1535 | bool __intel_dp_read_desc(struct intel_dp *intel_dp, | ||
| 1536 | struct intel_dp_desc *desc); | ||
| 1537 | bool intel_dp_read_desc(struct intel_dp *intel_dp); | ||
| 1538 | int intel_dp_link_required(int pixel_clock, int bpp); | 1527 | int intel_dp_link_required(int pixel_clock, int bpp); |
| 1539 | int intel_dp_max_data_rate(int max_link_clock, int max_lanes); | 1528 | int intel_dp_max_data_rate(int max_link_clock, int max_lanes); |
| 1540 | bool intel_digital_port_connected(struct drm_i915_private *dev_priv, | 1529 | bool intel_digital_port_connected(struct drm_i915_private *dev_priv, |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 854e8e0c836b..f94eacff196c 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
| @@ -1075,6 +1075,22 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) | |||
| 1075 | return 0; | 1075 | return 0; |
| 1076 | } | 1076 | } |
| 1077 | 1077 | ||
| 1078 | static bool ring_is_idle(struct intel_engine_cs *engine) | ||
| 1079 | { | ||
| 1080 | struct drm_i915_private *dev_priv = engine->i915; | ||
| 1081 | bool idle = true; | ||
| 1082 | |||
| 1083 | intel_runtime_pm_get(dev_priv); | ||
| 1084 | |||
| 1085 | /* No bit for gen2, so assume the CS parser is idle */ | ||
| 1086 | if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) | ||
| 1087 | idle = false; | ||
| 1088 | |||
| 1089 | intel_runtime_pm_put(dev_priv); | ||
| 1090 | |||
| 1091 | return idle; | ||
| 1092 | } | ||
| 1093 | |||
| 1078 | /** | 1094 | /** |
| 1079 | * intel_engine_is_idle() - Report if the engine has finished process all work | 1095 | * intel_engine_is_idle() - Report if the engine has finished process all work |
| 1080 | * @engine: the intel_engine_cs | 1096 | * @engine: the intel_engine_cs |
| @@ -1084,8 +1100,6 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) | |||
| 1084 | */ | 1100 | */ |
| 1085 | bool intel_engine_is_idle(struct intel_engine_cs *engine) | 1101 | bool intel_engine_is_idle(struct intel_engine_cs *engine) |
| 1086 | { | 1102 | { |
| 1087 | struct drm_i915_private *dev_priv = engine->i915; | ||
| 1088 | |||
| 1089 | /* Any inflight/incomplete requests? */ | 1103 | /* Any inflight/incomplete requests? */ |
| 1090 | if (!i915_seqno_passed(intel_engine_get_seqno(engine), | 1104 | if (!i915_seqno_passed(intel_engine_get_seqno(engine), |
| 1091 | intel_engine_last_submit(engine))) | 1105 | intel_engine_last_submit(engine))) |
| @@ -1100,7 +1114,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) | |||
| 1100 | return false; | 1114 | return false; |
| 1101 | 1115 | ||
| 1102 | /* Ring stopped? */ | 1116 | /* Ring stopped? */ |
| 1103 | if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) | 1117 | if (!ring_is_idle(engine)) |
| 1104 | return false; | 1118 | return false; |
| 1105 | 1119 | ||
| 1106 | return true; | 1120 | return true; |
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index ded2add18b26..d93c58410bff 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
| @@ -82,20 +82,10 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc) | |||
| 82 | static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, | 82 | static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, |
| 83 | int *width, int *height) | 83 | int *width, int *height) |
| 84 | { | 84 | { |
| 85 | int w, h; | ||
| 86 | |||
| 87 | if (drm_rotation_90_or_270(cache->plane.rotation)) { | ||
| 88 | w = cache->plane.src_h; | ||
| 89 | h = cache->plane.src_w; | ||
| 90 | } else { | ||
| 91 | w = cache->plane.src_w; | ||
| 92 | h = cache->plane.src_h; | ||
| 93 | } | ||
| 94 | |||
| 95 | if (width) | 85 | if (width) |
| 96 | *width = w; | 86 | *width = cache->plane.src_w; |
| 97 | if (height) | 87 | if (height) |
| 98 | *height = h; | 88 | *height = cache->plane.src_h; |
| 99 | } | 89 | } |
| 100 | 90 | ||
| 101 | static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, | 91 | static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, |
| @@ -746,6 +736,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, | |||
| 746 | cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; | 736 | cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; |
| 747 | 737 | ||
| 748 | cache->plane.rotation = plane_state->base.rotation; | 738 | cache->plane.rotation = plane_state->base.rotation; |
| 739 | /* | ||
| 740 | * Src coordinates are already rotated by 270 degrees for | ||
| 741 | * the 90/270 degree plane rotation cases (to match the | ||
| 742 | * GTT mapping), hence no need to account for rotation here. | ||
| 743 | */ | ||
| 749 | cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16; | 744 | cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16; |
| 750 | cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16; | 745 | cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16; |
| 751 | cache->plane.visible = plane_state->base.visible; | 746 | cache->plane.visible = plane_state->base.visible; |
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index 668f00480d97..292fedf30b00 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c | |||
| @@ -149,44 +149,10 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv) | |||
| 149 | 149 | ||
| 150 | static void lpe_audio_irq_unmask(struct irq_data *d) | 150 | static void lpe_audio_irq_unmask(struct irq_data *d) |
| 151 | { | 151 | { |
| 152 | struct drm_i915_private *dev_priv = d->chip_data; | ||
| 153 | unsigned long irqflags; | ||
| 154 | u32 val = (I915_LPE_PIPE_A_INTERRUPT | | ||
| 155 | I915_LPE_PIPE_B_INTERRUPT); | ||
| 156 | |||
| 157 | if (IS_CHERRYVIEW(dev_priv)) | ||
| 158 | val |= I915_LPE_PIPE_C_INTERRUPT; | ||
| 159 | |||
| 160 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
| 161 | |||
| 162 | dev_priv->irq_mask &= ~val; | ||
| 163 | I915_WRITE(VLV_IIR, val); | ||
| 164 | I915_WRITE(VLV_IIR, val); | ||
| 165 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | ||
| 166 | POSTING_READ(VLV_IMR); | ||
| 167 | |||
| 168 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
| 169 | } | 152 | } |
| 170 | 153 | ||
| 171 | static void lpe_audio_irq_mask(struct irq_data *d) | 154 | static void lpe_audio_irq_mask(struct irq_data *d) |
| 172 | { | 155 | { |
| 173 | struct drm_i915_private *dev_priv = d->chip_data; | ||
| 174 | unsigned long irqflags; | ||
| 175 | u32 val = (I915_LPE_PIPE_A_INTERRUPT | | ||
| 176 | I915_LPE_PIPE_B_INTERRUPT); | ||
| 177 | |||
| 178 | if (IS_CHERRYVIEW(dev_priv)) | ||
| 179 | val |= I915_LPE_PIPE_C_INTERRUPT; | ||
| 180 | |||
| 181 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
| 182 | |||
| 183 | dev_priv->irq_mask |= val; | ||
| 184 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | ||
| 185 | I915_WRITE(VLV_IIR, val); | ||
| 186 | I915_WRITE(VLV_IIR, val); | ||
| 187 | POSTING_READ(VLV_IIR); | ||
| 188 | |||
| 189 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
| 190 | } | 156 | } |
| 191 | 157 | ||
| 192 | static struct irq_chip lpe_audio_irqchip = { | 158 | static struct irq_chip lpe_audio_irqchip = { |
| @@ -330,8 +296,6 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) | |||
| 330 | 296 | ||
| 331 | desc = irq_to_desc(dev_priv->lpe_audio.irq); | 297 | desc = irq_to_desc(dev_priv->lpe_audio.irq); |
| 332 | 298 | ||
| 333 | lpe_audio_irq_mask(&desc->irq_data); | ||
| 334 | |||
| 335 | lpe_audio_platdev_destroy(dev_priv); | 299 | lpe_audio_platdev_destroy(dev_priv); |
| 336 | 300 | ||
| 337 | irq_free_desc(dev_priv->lpe_audio.irq); | 301 | irq_free_desc(dev_priv->lpe_audio.irq); |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index c8f7c631fc1f..62f44d3e7c43 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq) | |||
| 326 | rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; | 326 | rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; |
| 327 | u32 *reg_state = ce->lrc_reg_state; | 327 | u32 *reg_state = ce->lrc_reg_state; |
| 328 | 328 | ||
| 329 | assert_ring_tail_valid(rq->ring, rq->tail); | 329 | reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); |
| 330 | reg_state[CTX_RING_TAIL+1] = rq->tail; | ||
| 331 | 330 | ||
| 332 | /* True 32b PPGTT with dynamic page allocation: update PDP | 331 | /* True 32b PPGTT with dynamic page allocation: update PDP |
| 333 | * registers and point the unallocated PDPs to scratch page. | 332 | * registers and point the unallocated PDPs to scratch page. |
| @@ -1989,7 +1988,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, | |||
| 1989 | 1988 | ||
| 1990 | ce->ring = ring; | 1989 | ce->ring = ring; |
| 1991 | ce->state = vma; | 1990 | ce->state = vma; |
| 1992 | ce->initialised = engine->init_context == NULL; | 1991 | ce->initialised |= engine->init_context == NULL; |
| 1993 | 1992 | ||
| 1994 | return 0; | 1993 | return 0; |
| 1995 | 1994 | ||
| @@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv) | |||
| 2036 | ce->state->obj->mm.dirty = true; | 2035 | ce->state->obj->mm.dirty = true; |
| 2037 | i915_gem_object_unpin_map(ce->state->obj); | 2036 | i915_gem_object_unpin_map(ce->state->obj); |
| 2038 | 2037 | ||
| 2039 | ce->ring->head = ce->ring->tail = 0; | 2038 | intel_ring_reset(ce->ring, 0); |
| 2040 | intel_ring_update_space(ce->ring); | ||
| 2041 | } | 2039 | } |
| 2042 | } | 2040 | } |
| 2043 | } | 2041 | } |
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 71cbe9c08932..5abef482eacf 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c | |||
| @@ -240,7 +240,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port) | |||
| 240 | return false; | 240 | return false; |
| 241 | } | 241 | } |
| 242 | 242 | ||
| 243 | intel_dp_read_desc(dp); | 243 | drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd)); |
| 244 | 244 | ||
| 245 | DRM_DEBUG_KMS("Success: LSPCON init\n"); | 245 | DRM_DEBUG_KMS("Success: LSPCON init\n"); |
| 246 | return true; | 246 | return true; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 570bd603f401..078fd1bfa5ea 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -3373,20 +3373,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate, | |||
| 3373 | 3373 | ||
| 3374 | /* n.b., src is 16.16 fixed point, dst is whole integer */ | 3374 | /* n.b., src is 16.16 fixed point, dst is whole integer */ |
| 3375 | if (plane->id == PLANE_CURSOR) { | 3375 | if (plane->id == PLANE_CURSOR) { |
| 3376 | /* | ||
| 3377 | * Cursors only support 0/180 degree rotation, | ||
| 3378 | * hence no need to account for rotation here. | ||
| 3379 | */ | ||
| 3376 | src_w = pstate->base.src_w; | 3380 | src_w = pstate->base.src_w; |
| 3377 | src_h = pstate->base.src_h; | 3381 | src_h = pstate->base.src_h; |
| 3378 | dst_w = pstate->base.crtc_w; | 3382 | dst_w = pstate->base.crtc_w; |
| 3379 | dst_h = pstate->base.crtc_h; | 3383 | dst_h = pstate->base.crtc_h; |
| 3380 | } else { | 3384 | } else { |
| 3385 | /* | ||
| 3386 | * Src coordinates are already rotated by 270 degrees for | ||
| 3387 | * the 90/270 degree plane rotation cases (to match the | ||
| 3388 | * GTT mapping), hence no need to account for rotation here. | ||
| 3389 | */ | ||
| 3381 | src_w = drm_rect_width(&pstate->base.src); | 3390 | src_w = drm_rect_width(&pstate->base.src); |
| 3382 | src_h = drm_rect_height(&pstate->base.src); | 3391 | src_h = drm_rect_height(&pstate->base.src); |
| 3383 | dst_w = drm_rect_width(&pstate->base.dst); | 3392 | dst_w = drm_rect_width(&pstate->base.dst); |
| 3384 | dst_h = drm_rect_height(&pstate->base.dst); | 3393 | dst_h = drm_rect_height(&pstate->base.dst); |
| 3385 | } | 3394 | } |
| 3386 | 3395 | ||
| 3387 | if (drm_rotation_90_or_270(pstate->base.rotation)) | ||
| 3388 | swap(dst_w, dst_h); | ||
| 3389 | |||
| 3390 | downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); | 3396 | downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); |
| 3391 | downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); | 3397 | downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); |
| 3392 | 3398 | ||
| @@ -3417,12 +3423,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, | |||
| 3417 | if (y && format != DRM_FORMAT_NV12) | 3423 | if (y && format != DRM_FORMAT_NV12) |
| 3418 | return 0; | 3424 | return 0; |
| 3419 | 3425 | ||
| 3426 | /* | ||
| 3427 | * Src coordinates are already rotated by 270 degrees for | ||
| 3428 | * the 90/270 degree plane rotation cases (to match the | ||
| 3429 | * GTT mapping), hence no need to account for rotation here. | ||
| 3430 | */ | ||
| 3420 | width = drm_rect_width(&intel_pstate->base.src) >> 16; | 3431 | width = drm_rect_width(&intel_pstate->base.src) >> 16; |
| 3421 | height = drm_rect_height(&intel_pstate->base.src) >> 16; | 3432 | height = drm_rect_height(&intel_pstate->base.src) >> 16; |
| 3422 | 3433 | ||
| 3423 | if (drm_rotation_90_or_270(pstate->rotation)) | ||
| 3424 | swap(width, height); | ||
| 3425 | |||
| 3426 | /* for planar format */ | 3434 | /* for planar format */ |
| 3427 | if (format == DRM_FORMAT_NV12) { | 3435 | if (format == DRM_FORMAT_NV12) { |
| 3428 | if (y) /* y-plane data rate */ | 3436 | if (y) /* y-plane data rate */ |
| @@ -3505,12 +3513,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate, | |||
| 3505 | fb->modifier != I915_FORMAT_MOD_Yf_TILED) | 3513 | fb->modifier != I915_FORMAT_MOD_Yf_TILED) |
| 3506 | return 8; | 3514 | return 8; |
| 3507 | 3515 | ||
| 3516 | /* | ||
| 3517 | * Src coordinates are already rotated by 270 degrees for | ||
| 3518 | * the 90/270 degree plane rotation cases (to match the | ||
| 3519 | * GTT mapping), hence no need to account for rotation here. | ||
| 3520 | */ | ||
| 3508 | src_w = drm_rect_width(&intel_pstate->base.src) >> 16; | 3521 | src_w = drm_rect_width(&intel_pstate->base.src) >> 16; |
| 3509 | src_h = drm_rect_height(&intel_pstate->base.src) >> 16; | 3522 | src_h = drm_rect_height(&intel_pstate->base.src) >> 16; |
| 3510 | 3523 | ||
| 3511 | if (drm_rotation_90_or_270(pstate->rotation)) | ||
| 3512 | swap(src_w, src_h); | ||
| 3513 | |||
| 3514 | /* Halve UV plane width and height for NV12 */ | 3524 | /* Halve UV plane width and height for NV12 */ |
| 3515 | if (fb->format->format == DRM_FORMAT_NV12 && !y) { | 3525 | if (fb->format->format == DRM_FORMAT_NV12 && !y) { |
| 3516 | src_w /= 2; | 3526 | src_w /= 2; |
| @@ -3794,13 +3804,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
| 3794 | width = intel_pstate->base.crtc_w; | 3804 | width = intel_pstate->base.crtc_w; |
| 3795 | height = intel_pstate->base.crtc_h; | 3805 | height = intel_pstate->base.crtc_h; |
| 3796 | } else { | 3806 | } else { |
| 3807 | /* | ||
| 3808 | * Src coordinates are already rotated by 270 degrees for | ||
| 3809 | * the 90/270 degree plane rotation cases (to match the | ||
| 3810 | * GTT mapping), hence no need to account for rotation here. | ||
| 3811 | */ | ||
| 3797 | width = drm_rect_width(&intel_pstate->base.src) >> 16; | 3812 | width = drm_rect_width(&intel_pstate->base.src) >> 16; |
| 3798 | height = drm_rect_height(&intel_pstate->base.src) >> 16; | 3813 | height = drm_rect_height(&intel_pstate->base.src) >> 16; |
| 3799 | } | 3814 | } |
| 3800 | 3815 | ||
| 3801 | if (drm_rotation_90_or_270(pstate->rotation)) | ||
| 3802 | swap(width, height); | ||
| 3803 | |||
| 3804 | cpp = fb->format->cpp[0]; | 3816 | cpp = fb->format->cpp[0]; |
| 3805 | plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); | 3817 | plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); |
| 3806 | 3818 | ||
| @@ -4335,11 +4347,19 @@ skl_compute_wm(struct drm_atomic_state *state) | |||
| 4335 | struct drm_crtc_state *cstate; | 4347 | struct drm_crtc_state *cstate; |
| 4336 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | 4348 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); |
| 4337 | struct skl_wm_values *results = &intel_state->wm_results; | 4349 | struct skl_wm_values *results = &intel_state->wm_results; |
| 4350 | struct drm_device *dev = state->dev; | ||
| 4338 | struct skl_pipe_wm *pipe_wm; | 4351 | struct skl_pipe_wm *pipe_wm; |
| 4339 | bool changed = false; | 4352 | bool changed = false; |
| 4340 | int ret, i; | 4353 | int ret, i; |
| 4341 | 4354 | ||
| 4342 | /* | 4355 | /* |
| 4356 | * When we distrust bios wm we always need to recompute to set the | ||
| 4357 | * expected DDB allocations for each CRTC. | ||
| 4358 | */ | ||
| 4359 | if (to_i915(dev)->wm.distrust_bios_wm) | ||
| 4360 | changed = true; | ||
| 4361 | |||
| 4362 | /* | ||
| 4343 | * If this transaction isn't actually touching any CRTC's, don't | 4363 | * If this transaction isn't actually touching any CRTC's, don't |
| 4344 | * bother with watermark calculation. Note that if we pass this | 4364 | * bother with watermark calculation. Note that if we pass this |
| 4345 | * test, we're guaranteed to hold at least one CRTC state mutex, | 4365 | * test, we're guaranteed to hold at least one CRTC state mutex, |
| @@ -4349,6 +4369,7 @@ skl_compute_wm(struct drm_atomic_state *state) | |||
| 4349 | */ | 4369 | */ |
| 4350 | for_each_new_crtc_in_state(state, crtc, cstate, i) | 4370 | for_each_new_crtc_in_state(state, crtc, cstate, i) |
| 4351 | changed = true; | 4371 | changed = true; |
| 4372 | |||
| 4352 | if (!changed) | 4373 | if (!changed) |
| 4353 | return 0; | 4374 | return 0; |
| 4354 | 4375 | ||
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index c3780d0d2baf..559f1ab42bfc 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c | |||
| @@ -435,8 +435,9 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) | |||
| 435 | } | 435 | } |
| 436 | 436 | ||
| 437 | /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */ | 437 | /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */ |
| 438 | if (intel_crtc->config->pipe_src_w > 3200 || | 438 | if (dev_priv->psr.psr2_support && |
| 439 | intel_crtc->config->pipe_src_h > 2000) { | 439 | (intel_crtc->config->pipe_src_w > 3200 || |
| 440 | intel_crtc->config->pipe_src_h > 2000)) { | ||
| 440 | dev_priv->psr.psr2_support = false; | 441 | dev_priv->psr.psr2_support = false; |
| 441 | return false; | 442 | return false; |
| 442 | } | 443 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 66a2b8b83972..513a0f4b469b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size) | |||
| 49 | 49 | ||
| 50 | void intel_ring_update_space(struct intel_ring *ring) | 50 | void intel_ring_update_space(struct intel_ring *ring) |
| 51 | { | 51 | { |
| 52 | ring->space = __intel_ring_space(ring->head, ring->tail, ring->size); | 52 | ring->space = __intel_ring_space(ring->head, ring->emit, ring->size); |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | static int | 55 | static int |
| @@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request) | |||
| 774 | 774 | ||
| 775 | i915_gem_request_submit(request); | 775 | i915_gem_request_submit(request); |
| 776 | 776 | ||
| 777 | assert_ring_tail_valid(request->ring, request->tail); | 777 | I915_WRITE_TAIL(request->engine, |
| 778 | I915_WRITE_TAIL(request->engine, request->tail); | 778 | intel_ring_set_tail(request->ring, request->tail)); |
| 779 | } | 779 | } |
| 780 | 780 | ||
| 781 | static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) | 781 | static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) |
| @@ -1316,11 +1316,23 @@ err: | |||
| 1316 | return PTR_ERR(addr); | 1316 | return PTR_ERR(addr); |
| 1317 | } | 1317 | } |
| 1318 | 1318 | ||
| 1319 | void intel_ring_reset(struct intel_ring *ring, u32 tail) | ||
| 1320 | { | ||
| 1321 | GEM_BUG_ON(!list_empty(&ring->request_list)); | ||
| 1322 | ring->tail = tail; | ||
| 1323 | ring->head = tail; | ||
| 1324 | ring->emit = tail; | ||
| 1325 | intel_ring_update_space(ring); | ||
| 1326 | } | ||
| 1327 | |||
| 1319 | void intel_ring_unpin(struct intel_ring *ring) | 1328 | void intel_ring_unpin(struct intel_ring *ring) |
| 1320 | { | 1329 | { |
| 1321 | GEM_BUG_ON(!ring->vma); | 1330 | GEM_BUG_ON(!ring->vma); |
| 1322 | GEM_BUG_ON(!ring->vaddr); | 1331 | GEM_BUG_ON(!ring->vaddr); |
| 1323 | 1332 | ||
| 1333 | /* Discard any unused bytes beyond that submitted to hw. */ | ||
| 1334 | intel_ring_reset(ring, ring->tail); | ||
| 1335 | |||
| 1324 | if (i915_vma_is_map_and_fenceable(ring->vma)) | 1336 | if (i915_vma_is_map_and_fenceable(ring->vma)) |
| 1325 | i915_vma_unpin_iomap(ring->vma); | 1337 | i915_vma_unpin_iomap(ring->vma); |
| 1326 | else | 1338 | else |
| @@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv) | |||
| 1562 | struct intel_engine_cs *engine; | 1574 | struct intel_engine_cs *engine; |
| 1563 | enum intel_engine_id id; | 1575 | enum intel_engine_id id; |
| 1564 | 1576 | ||
| 1577 | /* Restart from the beginning of the rings for convenience */ | ||
| 1565 | for_each_engine(engine, dev_priv, id) | 1578 | for_each_engine(engine, dev_priv, id) |
| 1566 | engine->buffer->head = engine->buffer->tail; | 1579 | intel_ring_reset(engine->buffer, 0); |
| 1567 | } | 1580 | } |
| 1568 | 1581 | ||
| 1569 | static int ring_request_alloc(struct drm_i915_gem_request *request) | 1582 | static int ring_request_alloc(struct drm_i915_gem_request *request) |
| @@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) | |||
| 1616 | unsigned space; | 1629 | unsigned space; |
| 1617 | 1630 | ||
| 1618 | /* Would completion of this request free enough space? */ | 1631 | /* Would completion of this request free enough space? */ |
| 1619 | space = __intel_ring_space(target->postfix, ring->tail, | 1632 | space = __intel_ring_space(target->postfix, ring->emit, |
| 1620 | ring->size); | 1633 | ring->size); |
| 1621 | if (space >= bytes) | 1634 | if (space >= bytes) |
| 1622 | break; | 1635 | break; |
| @@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) | |||
| 1641 | u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) | 1654 | u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) |
| 1642 | { | 1655 | { |
| 1643 | struct intel_ring *ring = req->ring; | 1656 | struct intel_ring *ring = req->ring; |
| 1644 | int remain_actual = ring->size - ring->tail; | 1657 | int remain_actual = ring->size - ring->emit; |
| 1645 | int remain_usable = ring->effective_size - ring->tail; | 1658 | int remain_usable = ring->effective_size - ring->emit; |
| 1646 | int bytes = num_dwords * sizeof(u32); | 1659 | int bytes = num_dwords * sizeof(u32); |
| 1647 | int total_bytes, wait_bytes; | 1660 | int total_bytes, wait_bytes; |
| 1648 | bool need_wrap = false; | 1661 | bool need_wrap = false; |
| @@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) | |||
| 1678 | 1691 | ||
| 1679 | if (unlikely(need_wrap)) { | 1692 | if (unlikely(need_wrap)) { |
| 1680 | GEM_BUG_ON(remain_actual > ring->space); | 1693 | GEM_BUG_ON(remain_actual > ring->space); |
| 1681 | GEM_BUG_ON(ring->tail + remain_actual > ring->size); | 1694 | GEM_BUG_ON(ring->emit + remain_actual > ring->size); |
| 1682 | 1695 | ||
| 1683 | /* Fill the tail with MI_NOOP */ | 1696 | /* Fill the tail with MI_NOOP */ |
| 1684 | memset(ring->vaddr + ring->tail, 0, remain_actual); | 1697 | memset(ring->vaddr + ring->emit, 0, remain_actual); |
| 1685 | ring->tail = 0; | 1698 | ring->emit = 0; |
| 1686 | ring->space -= remain_actual; | 1699 | ring->space -= remain_actual; |
| 1687 | } | 1700 | } |
| 1688 | 1701 | ||
| 1689 | GEM_BUG_ON(ring->tail > ring->size - bytes); | 1702 | GEM_BUG_ON(ring->emit > ring->size - bytes); |
| 1690 | cs = ring->vaddr + ring->tail; | 1703 | cs = ring->vaddr + ring->emit; |
| 1691 | ring->tail += bytes; | 1704 | ring->emit += bytes; |
| 1692 | ring->space -= bytes; | 1705 | ring->space -= bytes; |
| 1693 | GEM_BUG_ON(ring->space < 0); | 1706 | GEM_BUG_ON(ring->space < 0); |
| 1694 | 1707 | ||
| @@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) | |||
| 1699 | int intel_ring_cacheline_align(struct drm_i915_gem_request *req) | 1712 | int intel_ring_cacheline_align(struct drm_i915_gem_request *req) |
| 1700 | { | 1713 | { |
| 1701 | int num_dwords = | 1714 | int num_dwords = |
| 1702 | (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); | 1715 | (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); |
| 1703 | u32 *cs; | 1716 | u32 *cs; |
| 1704 | 1717 | ||
| 1705 | if (num_dwords == 0) | 1718 | if (num_dwords == 0) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index a82a0807f64d..f7144fe09613 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
| @@ -145,6 +145,7 @@ struct intel_ring { | |||
| 145 | 145 | ||
| 146 | u32 head; | 146 | u32 head; |
| 147 | u32 tail; | 147 | u32 tail; |
| 148 | u32 emit; | ||
| 148 | 149 | ||
| 149 | int space; | 150 | int space; |
| 150 | int size; | 151 | int size; |
| @@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) | |||
| 488 | struct intel_ring * | 489 | struct intel_ring * |
| 489 | intel_engine_create_ring(struct intel_engine_cs *engine, int size); | 490 | intel_engine_create_ring(struct intel_engine_cs *engine, int size); |
| 490 | int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); | 491 | int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); |
| 492 | void intel_ring_reset(struct intel_ring *ring, u32 tail); | ||
| 493 | void intel_ring_update_space(struct intel_ring *ring); | ||
| 491 | void intel_ring_unpin(struct intel_ring *ring); | 494 | void intel_ring_unpin(struct intel_ring *ring); |
| 492 | void intel_ring_free(struct intel_ring *ring); | 495 | void intel_ring_free(struct intel_ring *ring); |
| 493 | 496 | ||
| @@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs) | |||
| 511 | * reserved for the command packet (i.e. the value passed to | 514 | * reserved for the command packet (i.e. the value passed to |
| 512 | * intel_ring_begin()). | 515 | * intel_ring_begin()). |
| 513 | */ | 516 | */ |
| 514 | GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs); | 517 | GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs); |
| 515 | } | 518 | } |
| 516 | 519 | ||
| 517 | static inline u32 | 520 | static inline u32 |
| @@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) | |||
| 540 | GEM_BUG_ON(tail >= ring->size); | 543 | GEM_BUG_ON(tail >= ring->size); |
| 541 | } | 544 | } |
| 542 | 545 | ||
| 543 | void intel_ring_update_space(struct intel_ring *ring); | 546 | static inline unsigned int |
| 547 | intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) | ||
| 548 | { | ||
| 549 | /* Whilst writes to the tail are strictly order, there is no | ||
| 550 | * serialisation between readers and the writers. The tail may be | ||
| 551 | * read by i915_gem_request_retire() just as it is being updated | ||
| 552 | * by execlists, as although the breadcrumb is complete, the context | ||
| 553 | * switch hasn't been seen. | ||
| 554 | */ | ||
| 555 | assert_ring_tail_valid(ring, tail); | ||
| 556 | ring->tail = tail; | ||
| 557 | return tail; | ||
| 558 | } | ||
| 544 | 559 | ||
| 545 | void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); | 560 | void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); |
| 546 | 561 | ||
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 8c87c717c7cd..e6517edcd16b 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
| @@ -83,10 +83,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, | |||
| 83 | */ | 83 | */ |
| 84 | void intel_pipe_update_start(struct intel_crtc *crtc) | 84 | void intel_pipe_update_start(struct intel_crtc *crtc) |
| 85 | { | 85 | { |
| 86 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
| 86 | const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; | 87 | const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; |
| 87 | long timeout = msecs_to_jiffies_timeout(1); | 88 | long timeout = msecs_to_jiffies_timeout(1); |
| 88 | int scanline, min, max, vblank_start; | 89 | int scanline, min, max, vblank_start; |
| 89 | wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); | 90 | wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); |
| 91 | bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && | ||
| 92 | intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI); | ||
| 90 | DEFINE_WAIT(wait); | 93 | DEFINE_WAIT(wait); |
| 91 | 94 | ||
| 92 | vblank_start = adjusted_mode->crtc_vblank_start; | 95 | vblank_start = adjusted_mode->crtc_vblank_start; |
| @@ -139,6 +142,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc) | |||
| 139 | 142 | ||
| 140 | drm_crtc_vblank_put(&crtc->base); | 143 | drm_crtc_vblank_put(&crtc->base); |
| 141 | 144 | ||
| 145 | /* | ||
| 146 | * On VLV/CHV DSI the scanline counter would appear to | ||
| 147 | * increment approx. 1/3 of a scanline before start of vblank. | ||
| 148 | * The registers still get latched at start of vblank however. | ||
| 149 | * This means we must not write any registers on the first | ||
| 150 | * line of vblank (since not the whole line is actually in | ||
| 151 | * vblank). And unfortunately we can't use the interrupt to | ||
| 152 | * wait here since it will fire too soon. We could use the | ||
| 153 | * frame start interrupt instead since it will fire after the | ||
| 154 | * critical scanline, but that would require more changes | ||
| 155 | * in the interrupt code. So for now we'll just do the nasty | ||
| 156 | * thing and poll for the bad scanline to pass us by. | ||
| 157 | * | ||
| 158 | * FIXME figure out if BXT+ DSI suffers from this as well | ||
| 159 | */ | ||
| 160 | while (need_vlv_dsi_wa && scanline == vblank_start) | ||
| 161 | scanline = intel_get_crtc_scanline(crtc); | ||
| 162 | |||
| 142 | crtc->debug.scanline_start = scanline; | 163 | crtc->debug.scanline_start = scanline; |
| 143 | crtc->debug.start_vbl_time = ktime_get(); | 164 | crtc->debug.start_vbl_time = ktime_get(); |
| 144 | crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); | 165 | crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); |
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index 4b7f73aeddac..f84115261ae7 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h | |||
| @@ -59,8 +59,6 @@ struct drm_i915_gem_request; | |||
| 59 | * available in the work queue (note, the queue is shared, | 59 | * available in the work queue (note, the queue is shared, |
| 60 | * not per-engine). It is OK for this to be nonzero, but | 60 | * not per-engine). It is OK for this to be nonzero, but |
| 61 | * it should not be huge! | 61 | * it should not be huge! |
| 62 | * q_fail: failed to enqueue a work item. This should never happen, | ||
| 63 | * because we check for space beforehand. | ||
| 64 | * b_fail: failed to ring the doorbell. This should never happen, unless | 62 | * b_fail: failed to ring the doorbell. This should never happen, unless |
| 65 | * somehow the hardware misbehaves, or maybe if the GuC firmware | 63 | * somehow the hardware misbehaves, or maybe if the GuC firmware |
| 66 | * crashes? We probably need to reset the GPU to recover. | 64 | * crashes? We probably need to reset the GPU to recover. |
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 1afb8b06e3e1..12b85b3278cd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c | |||
| @@ -320,7 +320,7 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj) | |||
| 320 | static int igt_ctx_exec(void *arg) | 320 | static int igt_ctx_exec(void *arg) |
| 321 | { | 321 | { |
| 322 | struct drm_i915_private *i915 = arg; | 322 | struct drm_i915_private *i915 = arg; |
| 323 | struct drm_i915_gem_object *obj; | 323 | struct drm_i915_gem_object *obj = NULL; |
| 324 | struct drm_file *file; | 324 | struct drm_file *file; |
| 325 | IGT_TIMEOUT(end_time); | 325 | IGT_TIMEOUT(end_time); |
| 326 | LIST_HEAD(objects); | 326 | LIST_HEAD(objects); |
| @@ -359,7 +359,7 @@ static int igt_ctx_exec(void *arg) | |||
| 359 | } | 359 | } |
| 360 | 360 | ||
| 361 | for_each_engine(engine, i915, id) { | 361 | for_each_engine(engine, i915, id) { |
| 362 | if (dw == 0) { | 362 | if (!obj) { |
| 363 | obj = create_test_object(ctx, file, &objects); | 363 | obj = create_test_object(ctx, file, &objects); |
| 364 | if (IS_ERR(obj)) { | 364 | if (IS_ERR(obj)) { |
| 365 | err = PTR_ERR(obj); | 365 | err = PTR_ERR(obj); |
| @@ -376,8 +376,10 @@ static int igt_ctx_exec(void *arg) | |||
| 376 | goto out_unlock; | 376 | goto out_unlock; |
| 377 | } | 377 | } |
| 378 | 378 | ||
| 379 | if (++dw == max_dwords(obj)) | 379 | if (++dw == max_dwords(obj)) { |
| 380 | obj = NULL; | ||
| 380 | dw = 0; | 381 | dw = 0; |
| 382 | } | ||
| 381 | ndwords++; | 383 | ndwords++; |
| 382 | } | 384 | } |
| 383 | ncontexts++; | 385 | ncontexts++; |
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 8fb801fab039..8b05ecb8fdef 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c | |||
| @@ -673,7 +673,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
| 673 | ret = drm_of_find_panel_or_bridge(child, | 673 | ret = drm_of_find_panel_or_bridge(child, |
| 674 | imx_ldb->lvds_mux ? 4 : 2, 0, | 674 | imx_ldb->lvds_mux ? 4 : 2, 0, |
| 675 | &channel->panel, &channel->bridge); | 675 | &channel->panel, &channel->bridge); |
| 676 | if (ret) | 676 | if (ret && ret != -ENODEV) |
| 677 | return ret; | 677 | return ret; |
| 678 | 678 | ||
| 679 | /* panel ddc only if there is no bridge */ | 679 | /* panel ddc only if there is no bridge */ |
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 808b995a990f..b5cc6e12334c 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <drm/drm_of.h> | 19 | #include <drm/drm_of.h> |
| 20 | #include <linux/clk.h> | 20 | #include <linux/clk.h> |
| 21 | #include <linux/component.h> | 21 | #include <linux/component.h> |
| 22 | #include <linux/iopoll.h> | ||
| 22 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
| 23 | #include <linux/of.h> | 24 | #include <linux/of.h> |
| 24 | #include <linux/of_platform.h> | 25 | #include <linux/of_platform.h> |
| @@ -900,16 +901,12 @@ static int mtk_dsi_host_detach(struct mipi_dsi_host *host, | |||
| 900 | 901 | ||
| 901 | static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi) | 902 | static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi) |
| 902 | { | 903 | { |
| 903 | u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */ | 904 | int ret; |
| 904 | 905 | u32 val; | |
| 905 | while (timeout_ms--) { | ||
| 906 | if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY)) | ||
| 907 | break; | ||
| 908 | |||
| 909 | usleep_range(2, 4); | ||
| 910 | } | ||
| 911 | 906 | ||
| 912 | if (timeout_ms == 0) { | 907 | ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY), |
| 908 | 4, 2000000); | ||
| 909 | if (ret) { | ||
| 913 | DRM_WARN("polling dsi wait not busy timeout!\n"); | 910 | DRM_WARN("polling dsi wait not busy timeout!\n"); |
| 914 | 911 | ||
| 915 | mtk_dsi_enable(dsi); | 912 | mtk_dsi_enable(dsi); |
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 41a1c03b0347..0a4ffd724146 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c | |||
| @@ -1062,7 +1062,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi, | |||
| 1062 | } | 1062 | } |
| 1063 | 1063 | ||
| 1064 | err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); | 1064 | err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); |
| 1065 | if (err) { | 1065 | if (err < 0) { |
| 1066 | dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n", | 1066 | dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n", |
| 1067 | err); | 1067 | err); |
| 1068 | return err; | 1068 | return err; |
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 75382f5f0fce..10b227d83e9a 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c | |||
| @@ -152,7 +152,7 @@ static struct regmap_config meson_regmap_config = { | |||
| 152 | .max_register = 0x1000, | 152 | .max_register = 0x1000, |
| 153 | }; | 153 | }; |
| 154 | 154 | ||
| 155 | static int meson_drv_bind(struct device *dev) | 155 | static int meson_drv_bind_master(struct device *dev, bool has_components) |
| 156 | { | 156 | { |
| 157 | struct platform_device *pdev = to_platform_device(dev); | 157 | struct platform_device *pdev = to_platform_device(dev); |
| 158 | struct meson_drm *priv; | 158 | struct meson_drm *priv; |
| @@ -233,10 +233,12 @@ static int meson_drv_bind(struct device *dev) | |||
| 233 | if (ret) | 233 | if (ret) |
| 234 | goto free_drm; | 234 | goto free_drm; |
| 235 | 235 | ||
| 236 | ret = component_bind_all(drm->dev, drm); | 236 | if (has_components) { |
| 237 | if (ret) { | 237 | ret = component_bind_all(drm->dev, drm); |
| 238 | dev_err(drm->dev, "Couldn't bind all components\n"); | 238 | if (ret) { |
| 239 | goto free_drm; | 239 | dev_err(drm->dev, "Couldn't bind all components\n"); |
| 240 | goto free_drm; | ||
| 241 | } | ||
| 240 | } | 242 | } |
| 241 | 243 | ||
| 242 | ret = meson_plane_create(priv); | 244 | ret = meson_plane_create(priv); |
| @@ -276,6 +278,11 @@ free_drm: | |||
| 276 | return ret; | 278 | return ret; |
| 277 | } | 279 | } |
| 278 | 280 | ||
| 281 | static int meson_drv_bind(struct device *dev) | ||
| 282 | { | ||
| 283 | return meson_drv_bind_master(dev, true); | ||
| 284 | } | ||
| 285 | |||
| 279 | static void meson_drv_unbind(struct device *dev) | 286 | static void meson_drv_unbind(struct device *dev) |
| 280 | { | 287 | { |
| 281 | struct drm_device *drm = dev_get_drvdata(dev); | 288 | struct drm_device *drm = dev_get_drvdata(dev); |
| @@ -357,6 +364,9 @@ static int meson_drv_probe(struct platform_device *pdev) | |||
| 357 | count += meson_probe_remote(pdev, &match, np, remote); | 364 | count += meson_probe_remote(pdev, &match, np, remote); |
| 358 | } | 365 | } |
| 359 | 366 | ||
| 367 | if (count && !match) | ||
| 368 | return meson_drv_bind_master(&pdev->dev, false); | ||
| 369 | |||
| 360 | /* If some endpoints were found, initialize the nodes */ | 370 | /* If some endpoints were found, initialize the nodes */ |
| 361 | if (count) { | 371 | if (count) { |
| 362 | dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count); | 372 | dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count); |
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index adb411a078e8..f4b53588e071 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
| @@ -1173,7 +1173,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
| 1173 | 1173 | ||
| 1174 | 1174 | ||
| 1175 | if (IS_G200_SE(mdev)) { | 1175 | if (IS_G200_SE(mdev)) { |
| 1176 | if (mdev->unique_rev_id >= 0x02) { | 1176 | if (mdev->unique_rev_id >= 0x04) { |
| 1177 | WREG8(MGAREG_CRTCEXT_INDEX, 0x06); | ||
| 1178 | WREG8(MGAREG_CRTCEXT_DATA, 0); | ||
| 1179 | } else if (mdev->unique_rev_id >= 0x02) { | ||
| 1177 | u8 hi_pri_lvl; | 1180 | u8 hi_pri_lvl; |
| 1178 | u32 bpp; | 1181 | u32 bpp; |
| 1179 | u32 mb; | 1182 | u32 mb; |
| @@ -1639,6 +1642,10 @@ static int mga_vga_mode_valid(struct drm_connector *connector, | |||
| 1639 | if (mga_vga_calculate_mode_bandwidth(mode, bpp) | 1642 | if (mga_vga_calculate_mode_bandwidth(mode, bpp) |
| 1640 | > (30100 * 1024)) | 1643 | > (30100 * 1024)) |
| 1641 | return MODE_BANDWIDTH; | 1644 | return MODE_BANDWIDTH; |
| 1645 | } else { | ||
| 1646 | if (mga_vga_calculate_mode_bandwidth(mode, bpp) | ||
| 1647 | > (55000 * 1024)) | ||
| 1648 | return MODE_BANDWIDTH; | ||
| 1642 | } | 1649 | } |
| 1643 | } else if (mdev->type == G200_WB) { | 1650 | } else if (mdev->type == G200_WB) { |
| 1644 | if (mode->hdisplay > 1280) | 1651 | if (mode->hdisplay > 1280) |
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 5b8e23d051f2..0a31cd6d01ce 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig | |||
| @@ -13,6 +13,7 @@ config DRM_MSM | |||
| 13 | select QCOM_SCM | 13 | select QCOM_SCM |
| 14 | select SND_SOC_HDMI_CODEC if SND_SOC | 14 | select SND_SOC_HDMI_CODEC if SND_SOC |
| 15 | select SYNC_FILE | 15 | select SYNC_FILE |
| 16 | select PM_OPP | ||
| 16 | default y | 17 | default y |
| 17 | help | 18 | help |
| 18 | DRM/KMS driver for MSM/snapdragon. | 19 | DRM/KMS driver for MSM/snapdragon. |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c index f8f48d014978..9c34d7824988 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c | |||
| @@ -116,7 +116,7 @@ static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq, | |||
| 116 | return 0; | 116 | return 0; |
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | static struct irq_domain_ops mdss_hw_irqdomain_ops = { | 119 | static const struct irq_domain_ops mdss_hw_irqdomain_ops = { |
| 120 | .map = mdss_hw_irqdomain_map, | 120 | .map = mdss_hw_irqdomain_map, |
| 121 | .xlate = irq_domain_xlate_onecell, | 121 | .xlate = irq_domain_xlate_onecell, |
| 122 | }; | 122 | }; |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index a38c5fe6cc19..7d3741215387 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | |||
| @@ -225,9 +225,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane) | |||
| 225 | 225 | ||
| 226 | mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), | 226 | mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), |
| 227 | sizeof(*mdp5_state), GFP_KERNEL); | 227 | sizeof(*mdp5_state), GFP_KERNEL); |
| 228 | if (!mdp5_state) | ||
| 229 | return NULL; | ||
| 228 | 230 | ||
| 229 | if (mdp5_state && mdp5_state->base.fb) | 231 | __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base); |
| 230 | drm_framebuffer_reference(mdp5_state->base.fb); | ||
| 231 | 232 | ||
| 232 | return &mdp5_state->base; | 233 | return &mdp5_state->base; |
| 233 | } | 234 | } |
| @@ -444,6 +445,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, | |||
| 444 | mdp5_pipe_release(state->state, old_hwpipe); | 445 | mdp5_pipe_release(state->state, old_hwpipe); |
| 445 | mdp5_pipe_release(state->state, old_right_hwpipe); | 446 | mdp5_pipe_release(state->state, old_right_hwpipe); |
| 446 | } | 447 | } |
| 448 | } else { | ||
| 449 | mdp5_pipe_release(state->state, mdp5_state->hwpipe); | ||
| 450 | mdp5_pipe_release(state->state, mdp5_state->r_hwpipe); | ||
| 451 | mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL; | ||
| 447 | } | 452 | } |
| 448 | 453 | ||
| 449 | return 0; | 454 | return 0; |
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 87b5695d4034..9d498eb81906 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
| @@ -830,6 +830,7 @@ static struct drm_driver msm_driver = { | |||
| 830 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | 830 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
| 831 | .gem_prime_export = drm_gem_prime_export, | 831 | .gem_prime_export = drm_gem_prime_export, |
| 832 | .gem_prime_import = drm_gem_prime_import, | 832 | .gem_prime_import = drm_gem_prime_import, |
| 833 | .gem_prime_res_obj = msm_gem_prime_res_obj, | ||
| 833 | .gem_prime_pin = msm_gem_prime_pin, | 834 | .gem_prime_pin = msm_gem_prime_pin, |
| 834 | .gem_prime_unpin = msm_gem_prime_unpin, | 835 | .gem_prime_unpin = msm_gem_prime_unpin, |
| 835 | .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, | 836 | .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 28b6f9ba5066..1b26ca626528 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
| @@ -224,6 +224,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); | |||
| 224 | void *msm_gem_prime_vmap(struct drm_gem_object *obj); | 224 | void *msm_gem_prime_vmap(struct drm_gem_object *obj); |
| 225 | void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | 225 | void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
| 226 | int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); | 226 | int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); |
| 227 | struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj); | ||
| 227 | struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, | 228 | struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, |
| 228 | struct dma_buf_attachment *attach, struct sg_table *sg); | 229 | struct dma_buf_attachment *attach, struct sg_table *sg); |
| 229 | int msm_gem_prime_pin(struct drm_gem_object *obj); | 230 | int msm_gem_prime_pin(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index 3f299c537b77..a2f89bac9c16 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c | |||
| @@ -99,8 +99,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) | |||
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | struct msm_fence { | 101 | struct msm_fence { |
| 102 | struct msm_fence_context *fctx; | ||
| 103 | struct dma_fence base; | 102 | struct dma_fence base; |
| 103 | struct msm_fence_context *fctx; | ||
| 104 | }; | 104 | }; |
| 105 | 105 | ||
| 106 | static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) | 106 | static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) |
| @@ -130,19 +130,13 @@ static bool msm_fence_signaled(struct dma_fence *fence) | |||
| 130 | return fence_completed(f->fctx, f->base.seqno); | 130 | return fence_completed(f->fctx, f->base.seqno); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | static void msm_fence_release(struct dma_fence *fence) | ||
| 134 | { | ||
| 135 | struct msm_fence *f = to_msm_fence(fence); | ||
| 136 | kfree_rcu(f, base.rcu); | ||
| 137 | } | ||
| 138 | |||
| 139 | static const struct dma_fence_ops msm_fence_ops = { | 133 | static const struct dma_fence_ops msm_fence_ops = { |
| 140 | .get_driver_name = msm_fence_get_driver_name, | 134 | .get_driver_name = msm_fence_get_driver_name, |
| 141 | .get_timeline_name = msm_fence_get_timeline_name, | 135 | .get_timeline_name = msm_fence_get_timeline_name, |
| 142 | .enable_signaling = msm_fence_enable_signaling, | 136 | .enable_signaling = msm_fence_enable_signaling, |
| 143 | .signaled = msm_fence_signaled, | 137 | .signaled = msm_fence_signaled, |
| 144 | .wait = dma_fence_default_wait, | 138 | .wait = dma_fence_default_wait, |
| 145 | .release = msm_fence_release, | 139 | .release = dma_fence_free, |
| 146 | }; | 140 | }; |
| 147 | 141 | ||
| 148 | struct dma_fence * | 142 | struct dma_fence * |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 68e509b3b9e4..50289a23baf8 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -758,6 +758,8 @@ static int msm_gem_new_impl(struct drm_device *dev, | |||
| 758 | struct msm_gem_object *msm_obj; | 758 | struct msm_gem_object *msm_obj; |
| 759 | bool use_vram = false; | 759 | bool use_vram = false; |
| 760 | 760 | ||
| 761 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
| 762 | |||
| 761 | switch (flags & MSM_BO_CACHE_MASK) { | 763 | switch (flags & MSM_BO_CACHE_MASK) { |
| 762 | case MSM_BO_UNCACHED: | 764 | case MSM_BO_UNCACHED: |
| 763 | case MSM_BO_CACHED: | 765 | case MSM_BO_CACHED: |
| @@ -853,7 +855,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, | |||
| 853 | 855 | ||
| 854 | size = PAGE_ALIGN(dmabuf->size); | 856 | size = PAGE_ALIGN(dmabuf->size); |
| 855 | 857 | ||
| 858 | /* Take mutex so we can modify the inactive list in msm_gem_new_impl */ | ||
| 859 | mutex_lock(&dev->struct_mutex); | ||
| 856 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); | 860 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); |
| 861 | mutex_unlock(&dev->struct_mutex); | ||
| 862 | |||
| 857 | if (ret) | 863 | if (ret) |
| 858 | goto fail; | 864 | goto fail; |
| 859 | 865 | ||
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 60bb290700ce..13403c6da6c7 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c | |||
| @@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj) | |||
| 70 | if (!obj->import_attach) | 70 | if (!obj->import_attach) |
| 71 | msm_gem_put_pages(obj); | 71 | msm_gem_put_pages(obj); |
| 72 | } | 72 | } |
| 73 | |||
| 74 | struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj) | ||
| 75 | { | ||
| 76 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
| 77 | |||
| 78 | return msm_obj->resv; | ||
| 79 | } | ||
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 1c545ebe6a5a..7832e6421d25 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
| @@ -410,12 +410,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 410 | if (!in_fence) | 410 | if (!in_fence) |
| 411 | return -EINVAL; | 411 | return -EINVAL; |
| 412 | 412 | ||
| 413 | /* TODO if we get an array-fence due to userspace merging multiple | 413 | /* |
| 414 | * fences, we need a way to determine if all the backing fences | 414 | * Wait if the fence is from a foreign context, or if the fence |
| 415 | * are from our own context.. | 415 | * array contains any fence from a foreign context. |
| 416 | */ | 416 | */ |
| 417 | 417 | if (!dma_fence_match_context(in_fence, gpu->fctx->context)) { | |
| 418 | if (in_fence->context != gpu->fctx->context) { | ||
| 419 | ret = dma_fence_wait(in_fence, true); | 418 | ret = dma_fence_wait(in_fence, true); |
| 420 | if (ret) | 419 | if (ret) |
| 421 | return ret; | 420 | return ret; |
| @@ -496,8 +495,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 496 | goto out; | 495 | goto out; |
| 497 | } | 496 | } |
| 498 | 497 | ||
| 499 | if ((submit_cmd.size + submit_cmd.submit_offset) >= | 498 | if (!submit_cmd.size || |
| 500 | msm_obj->base.size) { | 499 | ((submit_cmd.size + submit_cmd.submit_offset) > |
| 500 | msm_obj->base.size)) { | ||
| 501 | DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); | 501 | DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); |
| 502 | ret = -EINVAL; | 502 | ret = -EINVAL; |
| 503 | goto out; | 503 | goto out; |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 97b9c38c6b3f..0fdc88d79ca8 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
| @@ -549,9 +549,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) | |||
| 549 | gpu->grp_clks[i] = get_clock(dev, name); | 549 | gpu->grp_clks[i] = get_clock(dev, name); |
| 550 | 550 | ||
| 551 | /* Remember the key clocks that we need to control later */ | 551 | /* Remember the key clocks that we need to control later */ |
| 552 | if (!strcmp(name, "core")) | 552 | if (!strcmp(name, "core") || !strcmp(name, "core_clk")) |
| 553 | gpu->core_clk = gpu->grp_clks[i]; | 553 | gpu->core_clk = gpu->grp_clks[i]; |
| 554 | else if (!strcmp(name, "rbbmtimer")) | 554 | else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk")) |
| 555 | gpu->rbbmtimer_clk = gpu->grp_clks[i]; | 555 | gpu->rbbmtimer_clk = gpu->grp_clks[i]; |
| 556 | 556 | ||
| 557 | ++i; | 557 | ++i; |
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c index 1144e0c9e894..0abe77675b76 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c | |||
| @@ -35,6 +35,13 @@ | |||
| 35 | #include "mxsfb_drv.h" | 35 | #include "mxsfb_drv.h" |
| 36 | #include "mxsfb_regs.h" | 36 | #include "mxsfb_regs.h" |
| 37 | 37 | ||
| 38 | #define MXS_SET_ADDR 0x4 | ||
| 39 | #define MXS_CLR_ADDR 0x8 | ||
| 40 | #define MODULE_CLKGATE BIT(30) | ||
| 41 | #define MODULE_SFTRST BIT(31) | ||
| 42 | /* 1 second delay should be plenty of time for block reset */ | ||
| 43 | #define RESET_TIMEOUT 1000000 | ||
| 44 | |||
| 38 | static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val) | 45 | static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val) |
| 39 | { | 46 | { |
| 40 | return (val & mxsfb->devdata->hs_wdth_mask) << | 47 | return (val & mxsfb->devdata->hs_wdth_mask) << |
| @@ -159,6 +166,36 @@ static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb) | |||
| 159 | clk_disable_unprepare(mxsfb->clk_disp_axi); | 166 | clk_disable_unprepare(mxsfb->clk_disp_axi); |
| 160 | } | 167 | } |
| 161 | 168 | ||
| 169 | /* | ||
| 170 | * Clear the bit and poll it cleared. This is usually called with | ||
| 171 | * a reset address and mask being either SFTRST(bit 31) or CLKGATE | ||
| 172 | * (bit 30). | ||
| 173 | */ | ||
| 174 | static int clear_poll_bit(void __iomem *addr, u32 mask) | ||
| 175 | { | ||
| 176 | u32 reg; | ||
| 177 | |||
| 178 | writel(mask, addr + MXS_CLR_ADDR); | ||
| 179 | return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT); | ||
| 180 | } | ||
| 181 | |||
| 182 | static int mxsfb_reset_block(void __iomem *reset_addr) | ||
| 183 | { | ||
| 184 | int ret; | ||
| 185 | |||
| 186 | ret = clear_poll_bit(reset_addr, MODULE_SFTRST); | ||
| 187 | if (ret) | ||
| 188 | return ret; | ||
| 189 | |||
| 190 | writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR); | ||
| 191 | |||
| 192 | ret = clear_poll_bit(reset_addr, MODULE_SFTRST); | ||
| 193 | if (ret) | ||
| 194 | return ret; | ||
| 195 | |||
| 196 | return clear_poll_bit(reset_addr, MODULE_CLKGATE); | ||
| 197 | } | ||
| 198 | |||
| 162 | static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) | 199 | static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) |
| 163 | { | 200 | { |
| 164 | struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode; | 201 | struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode; |
| @@ -173,6 +210,11 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) | |||
| 173 | */ | 210 | */ |
| 174 | mxsfb_enable_axi_clk(mxsfb); | 211 | mxsfb_enable_axi_clk(mxsfb); |
| 175 | 212 | ||
| 213 | /* Mandatory eLCDIF reset as per the Reference Manual */ | ||
| 214 | err = mxsfb_reset_block(mxsfb->base); | ||
| 215 | if (err) | ||
| 216 | return; | ||
| 217 | |||
| 176 | /* Clear the FIFOs */ | 218 | /* Clear the FIFOs */ |
| 177 | writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET); | 219 | writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET); |
| 178 | 220 | ||
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h index 6a567fe347b3..820a4805916f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | struct nvkm_alarm { | 5 | struct nvkm_alarm { |
| 6 | struct list_head head; | 6 | struct list_head head; |
| 7 | struct list_head exec; | ||
| 7 | u64 timestamp; | 8 | u64 timestamp; |
| 8 | void (*func)(struct nvkm_alarm *); | 9 | void (*func)(struct nvkm_alarm *); |
| 9 | }; | 10 | }; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 36268e1802b5..15a13d09d431 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
| @@ -80,7 +80,7 @@ int nouveau_modeset = -1; | |||
| 80 | module_param_named(modeset, nouveau_modeset, int, 0400); | 80 | module_param_named(modeset, nouveau_modeset, int, 0400); |
| 81 | 81 | ||
| 82 | MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); | 82 | MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); |
| 83 | int nouveau_runtime_pm = -1; | 83 | static int nouveau_runtime_pm = -1; |
| 84 | module_param_named(runpm, nouveau_runtime_pm, int, 0400); | 84 | module_param_named(runpm, nouveau_runtime_pm, int, 0400); |
| 85 | 85 | ||
| 86 | static struct drm_driver driver_stub; | 86 | static struct drm_driver driver_stub; |
| @@ -495,7 +495,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
| 495 | nouveau_fbcon_init(dev); | 495 | nouveau_fbcon_init(dev); |
| 496 | nouveau_led_init(dev); | 496 | nouveau_led_init(dev); |
| 497 | 497 | ||
| 498 | if (nouveau_runtime_pm != 0) { | 498 | if (nouveau_pmops_runtime()) { |
| 499 | pm_runtime_use_autosuspend(dev->dev); | 499 | pm_runtime_use_autosuspend(dev->dev); |
| 500 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); | 500 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); |
| 501 | pm_runtime_set_active(dev->dev); | 501 | pm_runtime_set_active(dev->dev); |
| @@ -527,7 +527,7 @@ nouveau_drm_unload(struct drm_device *dev) | |||
| 527 | { | 527 | { |
| 528 | struct nouveau_drm *drm = nouveau_drm(dev); | 528 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 529 | 529 | ||
| 530 | if (nouveau_runtime_pm != 0) { | 530 | if (nouveau_pmops_runtime()) { |
| 531 | pm_runtime_get_sync(dev->dev); | 531 | pm_runtime_get_sync(dev->dev); |
| 532 | pm_runtime_forbid(dev->dev); | 532 | pm_runtime_forbid(dev->dev); |
| 533 | } | 533 | } |
| @@ -726,6 +726,14 @@ nouveau_pmops_thaw(struct device *dev) | |||
| 726 | return nouveau_do_resume(drm_dev, false); | 726 | return nouveau_do_resume(drm_dev, false); |
| 727 | } | 727 | } |
| 728 | 728 | ||
| 729 | bool | ||
| 730 | nouveau_pmops_runtime() | ||
| 731 | { | ||
| 732 | if (nouveau_runtime_pm == -1) | ||
| 733 | return nouveau_is_optimus() || nouveau_is_v1_dsm(); | ||
| 734 | return nouveau_runtime_pm == 1; | ||
| 735 | } | ||
| 736 | |||
| 729 | static int | 737 | static int |
| 730 | nouveau_pmops_runtime_suspend(struct device *dev) | 738 | nouveau_pmops_runtime_suspend(struct device *dev) |
| 731 | { | 739 | { |
| @@ -733,14 +741,7 @@ nouveau_pmops_runtime_suspend(struct device *dev) | |||
| 733 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 741 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
| 734 | int ret; | 742 | int ret; |
| 735 | 743 | ||
| 736 | if (nouveau_runtime_pm == 0) { | 744 | if (!nouveau_pmops_runtime()) { |
| 737 | pm_runtime_forbid(dev); | ||
| 738 | return -EBUSY; | ||
| 739 | } | ||
| 740 | |||
| 741 | /* are we optimus enabled? */ | ||
| 742 | if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { | ||
| 743 | DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); | ||
| 744 | pm_runtime_forbid(dev); | 745 | pm_runtime_forbid(dev); |
| 745 | return -EBUSY; | 746 | return -EBUSY; |
| 746 | } | 747 | } |
| @@ -765,8 +766,10 @@ nouveau_pmops_runtime_resume(struct device *dev) | |||
| 765 | struct nvif_device *device = &nouveau_drm(drm_dev)->client.device; | 766 | struct nvif_device *device = &nouveau_drm(drm_dev)->client.device; |
| 766 | int ret; | 767 | int ret; |
| 767 | 768 | ||
| 768 | if (nouveau_runtime_pm == 0) | 769 | if (!nouveau_pmops_runtime()) { |
| 769 | return -EINVAL; | 770 | pm_runtime_forbid(dev); |
| 771 | return -EBUSY; | ||
| 772 | } | ||
| 770 | 773 | ||
| 771 | pci_set_power_state(pdev, PCI_D0); | 774 | pci_set_power_state(pdev, PCI_D0); |
| 772 | pci_restore_state(pdev); | 775 | pci_restore_state(pdev); |
| @@ -796,14 +799,7 @@ nouveau_pmops_runtime_idle(struct device *dev) | |||
| 796 | struct nouveau_drm *drm = nouveau_drm(drm_dev); | 799 | struct nouveau_drm *drm = nouveau_drm(drm_dev); |
| 797 | struct drm_crtc *crtc; | 800 | struct drm_crtc *crtc; |
| 798 | 801 | ||
| 799 | if (nouveau_runtime_pm == 0) { | 802 | if (!nouveau_pmops_runtime()) { |
| 800 | pm_runtime_forbid(dev); | ||
| 801 | return -EBUSY; | ||
| 802 | } | ||
| 803 | |||
| 804 | /* are we optimus enabled? */ | ||
| 805 | if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { | ||
| 806 | DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); | ||
| 807 | pm_runtime_forbid(dev); | 803 | pm_runtime_forbid(dev); |
| 808 | return -EBUSY; | 804 | return -EBUSY; |
| 809 | } | 805 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index eadec2f49ad3..a11b6aaed325 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -108,8 +108,6 @@ nouveau_cli(struct drm_file *fpriv) | |||
| 108 | #include <nvif/object.h> | 108 | #include <nvif/object.h> |
| 109 | #include <nvif/device.h> | 109 | #include <nvif/device.h> |
| 110 | 110 | ||
| 111 | extern int nouveau_runtime_pm; | ||
| 112 | |||
| 113 | struct nouveau_drm { | 111 | struct nouveau_drm { |
| 114 | struct nouveau_cli client; | 112 | struct nouveau_cli client; |
| 115 | struct drm_device *dev; | 113 | struct drm_device *dev; |
| @@ -195,6 +193,7 @@ nouveau_drm(struct drm_device *dev) | |||
| 195 | 193 | ||
| 196 | int nouveau_pmops_suspend(struct device *); | 194 | int nouveau_pmops_suspend(struct device *); |
| 197 | int nouveau_pmops_resume(struct device *); | 195 | int nouveau_pmops_resume(struct device *); |
| 196 | bool nouveau_pmops_runtime(void); | ||
| 198 | 197 | ||
| 199 | #include <nvkm/core/tegra.h> | 198 | #include <nvkm/core/tegra.h> |
| 200 | 199 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index a4aacbc0cec8..02fe0efb9e16 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c | |||
| @@ -87,7 +87,7 @@ void | |||
| 87 | nouveau_vga_init(struct nouveau_drm *drm) | 87 | nouveau_vga_init(struct nouveau_drm *drm) |
| 88 | { | 88 | { |
| 89 | struct drm_device *dev = drm->dev; | 89 | struct drm_device *dev = drm->dev; |
| 90 | bool runtime = false; | 90 | bool runtime = nouveau_pmops_runtime(); |
| 91 | 91 | ||
| 92 | /* only relevant for PCI devices */ | 92 | /* only relevant for PCI devices */ |
| 93 | if (!dev->pdev) | 93 | if (!dev->pdev) |
| @@ -99,10 +99,6 @@ nouveau_vga_init(struct nouveau_drm *drm) | |||
| 99 | if (pci_is_thunderbolt_attached(dev->pdev)) | 99 | if (pci_is_thunderbolt_attached(dev->pdev)) |
| 100 | return; | 100 | return; |
| 101 | 101 | ||
| 102 | if (nouveau_runtime_pm == 1) | ||
| 103 | runtime = true; | ||
| 104 | if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm())) | ||
| 105 | runtime = true; | ||
| 106 | vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime); | 102 | vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime); |
| 107 | 103 | ||
| 108 | if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) | 104 | if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) |
| @@ -113,18 +109,13 @@ void | |||
| 113 | nouveau_vga_fini(struct nouveau_drm *drm) | 109 | nouveau_vga_fini(struct nouveau_drm *drm) |
| 114 | { | 110 | { |
| 115 | struct drm_device *dev = drm->dev; | 111 | struct drm_device *dev = drm->dev; |
| 116 | bool runtime = false; | 112 | bool runtime = nouveau_pmops_runtime(); |
| 117 | 113 | ||
| 118 | vga_client_register(dev->pdev, NULL, NULL, NULL); | 114 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
| 119 | 115 | ||
| 120 | if (pci_is_thunderbolt_attached(dev->pdev)) | 116 | if (pci_is_thunderbolt_attached(dev->pdev)) |
| 121 | return; | 117 | return; |
| 122 | 118 | ||
| 123 | if (nouveau_runtime_pm == 1) | ||
| 124 | runtime = true; | ||
| 125 | if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm())) | ||
| 126 | runtime = true; | ||
| 127 | |||
| 128 | vga_switcheroo_unregister_client(dev->pdev); | 119 | vga_switcheroo_unregister_client(dev->pdev); |
| 129 | if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) | 120 | if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) |
| 130 | vga_switcheroo_fini_domain_pm_ops(drm->dev->dev); | 121 | vga_switcheroo_fini_domain_pm_ops(drm->dev->dev); |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index a7663249b3ba..06e564a9ccb2 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
| @@ -2107,7 +2107,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) | |||
| 2107 | asyc->set.dither = true; | 2107 | asyc->set.dither = true; |
| 2108 | } | 2108 | } |
| 2109 | } else { | 2109 | } else { |
| 2110 | asyc->set.mask = ~0; | 2110 | if (asyc) |
| 2111 | asyc->set.mask = ~0; | ||
| 2111 | asyh->set.mask = ~0; | 2112 | asyh->set.mask = ~0; |
| 2112 | } | 2113 | } |
| 2113 | 2114 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c index f2a86eae0a0d..2437f7d41ca2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c | |||
| @@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr) | |||
| 50 | /* Move to completed list. We'll drop the lock before | 50 | /* Move to completed list. We'll drop the lock before |
| 51 | * executing the callback so it can reschedule itself. | 51 | * executing the callback so it can reschedule itself. |
| 52 | */ | 52 | */ |
| 53 | list_move_tail(&alarm->head, &exec); | 53 | list_del_init(&alarm->head); |
| 54 | list_add(&alarm->exec, &exec); | ||
| 54 | } | 55 | } |
| 55 | 56 | ||
| 56 | /* Shut down interrupt if no more pending alarms. */ | 57 | /* Shut down interrupt if no more pending alarms. */ |
| @@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr) | |||
| 59 | spin_unlock_irqrestore(&tmr->lock, flags); | 60 | spin_unlock_irqrestore(&tmr->lock, flags); |
| 60 | 61 | ||
| 61 | /* Execute completed callbacks. */ | 62 | /* Execute completed callbacks. */ |
| 62 | list_for_each_entry_safe(alarm, atemp, &exec, head) { | 63 | list_for_each_entry_safe(alarm, atemp, &exec, exec) { |
| 63 | list_del_init(&alarm->head); | 64 | list_del(&alarm->exec); |
| 64 | alarm->func(alarm); | 65 | alarm->func(alarm); |
| 65 | } | 66 | } |
| 66 | } | 67 | } |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 008c145b7f29..ca44233ceacc 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
| @@ -9267,8 +9267,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev, | |||
| 9267 | u32 tmp, wm_mask; | 9267 | u32 tmp, wm_mask; |
| 9268 | 9268 | ||
| 9269 | if (radeon_crtc->base.enabled && num_heads && mode) { | 9269 | if (radeon_crtc->base.enabled && num_heads && mode) { |
| 9270 | active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; | 9270 | active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, |
| 9271 | line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); | 9271 | (u32)mode->clock); |
| 9272 | line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, | ||
| 9273 | (u32)mode->clock); | ||
| 9274 | line_time = min(line_time, (u32)65535); | ||
| 9272 | 9275 | ||
| 9273 | /* watermark for high clocks */ | 9276 | /* watermark for high clocks */ |
| 9274 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && | 9277 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 0bf103536404..534637203e70 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -2266,8 +2266,11 @@ static void evergreen_program_watermarks(struct radeon_device *rdev, | |||
| 2266 | fixed20_12 a, b, c; | 2266 | fixed20_12 a, b, c; |
| 2267 | 2267 | ||
| 2268 | if (radeon_crtc->base.enabled && num_heads && mode) { | 2268 | if (radeon_crtc->base.enabled && num_heads && mode) { |
| 2269 | active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; | 2269 | active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, |
| 2270 | line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); | 2270 | (u32)mode->clock); |
| 2271 | line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, | ||
| 2272 | (u32)mode->clock); | ||
| 2273 | line_time = min(line_time, (u32)65535); | ||
| 2271 | priority_a_cnt = 0; | 2274 | priority_a_cnt = 0; |
| 2272 | priority_b_cnt = 0; | 2275 | priority_b_cnt = 0; |
| 2273 | dram_channels = evergreen_get_number_of_dram_channels(rdev); | 2276 | dram_channels = evergreen_get_number_of_dram_channels(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 432480ff9d22..3178ba0c537c 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev) | |||
| 3393 | rdev->pdev->subsystem_vendor == 0x103c && | 3393 | rdev->pdev->subsystem_vendor == 0x103c && |
| 3394 | rdev->pdev->subsystem_device == 0x280a) | 3394 | rdev->pdev->subsystem_device == 0x280a) |
| 3395 | return; | 3395 | return; |
| 3396 | /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume | ||
| 3397 | * - it hangs on resume inside the dynclk 1 table. | ||
| 3398 | */ | ||
| 3399 | if (rdev->family == CHIP_RS400 && | ||
| 3400 | rdev->pdev->subsystem_vendor == 0x1179 && | ||
| 3401 | rdev->pdev->subsystem_device == 0xff31) | ||
| 3402 | return; | ||
| 3396 | 3403 | ||
| 3397 | /* DYN CLK 1 */ | 3404 | /* DYN CLK 1 */ |
| 3398 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); | 3405 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 6ecf42783d4b..0a6444d72000 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { | |||
| 136 | * https://bugzilla.kernel.org/show_bug.cgi?id=51381 | 136 | * https://bugzilla.kernel.org/show_bug.cgi?id=51381 |
| 137 | */ | 137 | */ |
| 138 | { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, | 138 | { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, |
| 139 | /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU | ||
| 140 | * https://bugs.freedesktop.org/show_bug.cgi?id=101491 | ||
| 141 | */ | ||
| 142 | { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, | ||
| 139 | /* macbook pro 8.2 */ | 143 | /* macbook pro 8.2 */ |
| 140 | { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, | 144 | { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, |
| 141 | { 0, 0, 0, 0, 0 }, | 145 | { 0, 0, 0, 0, 0 }, |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 7431eb4a11b7..d34d1cf33895 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
| @@ -621,7 +621,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
| 621 | } | 621 | } |
| 622 | 622 | ||
| 623 | /* TODO: is this still necessary on NI+ ? */ | 623 | /* TODO: is this still necessary on NI+ ? */ |
| 624 | if ((cmd == 0 || cmd == 1 || cmd == 0x3) && | 624 | if ((cmd == 0 || cmd == 0x3) && |
| 625 | (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { | 625 | (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { |
| 626 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", | 626 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", |
| 627 | start, end); | 627 | start, end); |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 76d1888528e6..5303f25d5280 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -2284,8 +2284,11 @@ static void dce6_program_watermarks(struct radeon_device *rdev, | |||
| 2284 | fixed20_12 a, b, c; | 2284 | fixed20_12 a, b, c; |
| 2285 | 2285 | ||
| 2286 | if (radeon_crtc->base.enabled && num_heads && mode) { | 2286 | if (radeon_crtc->base.enabled && num_heads && mode) { |
| 2287 | active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; | 2287 | active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, |
| 2288 | line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); | 2288 | (u32)mode->clock); |
| 2289 | line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, | ||
| 2290 | (u32)mode->clock); | ||
| 2291 | line_time = min(line_time, (u32)65535); | ||
| 2289 | priority_a_cnt = 0; | 2292 | priority_a_cnt = 0; |
| 2290 | priority_b_cnt = 0; | 2293 | priority_b_cnt = 0; |
| 2291 | 2294 | ||
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index d8fa7a9c9240..ce5f2d1f9994 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c | |||
| @@ -245,8 +245,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder, | |||
| 245 | struct drm_connector_state *conn_state) | 245 | struct drm_connector_state *conn_state) |
| 246 | { | 246 | { |
| 247 | struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); | 247 | struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); |
| 248 | struct rockchip_dp_device *dp = to_dp(encoder); | ||
| 249 | int ret; | ||
| 250 | 248 | ||
| 251 | /* | 249 | /* |
| 252 | * The hardware IC designed that VOP must output the RGB10 video | 250 | * The hardware IC designed that VOP must output the RGB10 video |
| @@ -258,16 +256,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder, | |||
| 258 | 256 | ||
| 259 | s->output_mode = ROCKCHIP_OUT_MODE_AAAA; | 257 | s->output_mode = ROCKCHIP_OUT_MODE_AAAA; |
| 260 | s->output_type = DRM_MODE_CONNECTOR_eDP; | 258 | s->output_type = DRM_MODE_CONNECTOR_eDP; |
| 261 | if (dp->data->chip_type == RK3399_EDP) { | ||
| 262 | /* | ||
| 263 | * For RK3399, VOP Lit must code the out mode to RGB888, | ||
| 264 | * VOP Big must code the out mode to RGB10. | ||
| 265 | */ | ||
| 266 | ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, | ||
| 267 | encoder); | ||
| 268 | if (ret > 0) | ||
| 269 | s->output_mode = ROCKCHIP_OUT_MODE_P888; | ||
| 270 | } | ||
| 271 | 259 | ||
| 272 | return 0; | 260 | return 0; |
| 273 | } | 261 | } |
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index a2169dd3d26b..14fa1f8351e8 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c | |||
| @@ -615,7 +615,6 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder) | |||
| 615 | { | 615 | { |
| 616 | struct cdn_dp_device *dp = encoder_to_dp(encoder); | 616 | struct cdn_dp_device *dp = encoder_to_dp(encoder); |
| 617 | int ret, val; | 617 | int ret, val; |
| 618 | struct rockchip_crtc_state *state; | ||
| 619 | 618 | ||
| 620 | ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); | 619 | ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); |
| 621 | if (ret < 0) { | 620 | if (ret < 0) { |
| @@ -625,14 +624,10 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder) | |||
| 625 | 624 | ||
| 626 | DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", | 625 | DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", |
| 627 | (ret) ? "LIT" : "BIG"); | 626 | (ret) ? "LIT" : "BIG"); |
| 628 | state = to_rockchip_crtc_state(encoder->crtc->state); | 627 | if (ret) |
| 629 | if (ret) { | ||
| 630 | val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); | 628 | val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); |
| 631 | state->output_mode = ROCKCHIP_OUT_MODE_P888; | 629 | else |
| 632 | } else { | ||
| 633 | val = DP_SEL_VOP_LIT << 16; | 630 | val = DP_SEL_VOP_LIT << 16; |
| 634 | state->output_mode = ROCKCHIP_OUT_MODE_AAAA; | ||
| 635 | } | ||
| 636 | 631 | ||
| 637 | ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); | 632 | ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); |
| 638 | if (ret) | 633 | if (ret) |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 3f7a82d1e095..45589d6ce65e 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c | |||
| @@ -875,6 +875,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, | |||
| 875 | static void vop_crtc_enable(struct drm_crtc *crtc) | 875 | static void vop_crtc_enable(struct drm_crtc *crtc) |
| 876 | { | 876 | { |
| 877 | struct vop *vop = to_vop(crtc); | 877 | struct vop *vop = to_vop(crtc); |
| 878 | const struct vop_data *vop_data = vop->data; | ||
| 878 | struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state); | 879 | struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state); |
| 879 | struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; | 880 | struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; |
| 880 | u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start; | 881 | u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start; |
| @@ -967,6 +968,13 @@ static void vop_crtc_enable(struct drm_crtc *crtc) | |||
| 967 | DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n", | 968 | DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n", |
| 968 | s->output_type); | 969 | s->output_type); |
| 969 | } | 970 | } |
| 971 | |||
| 972 | /* | ||
| 973 | * if vop is not support RGB10 output, need force RGB10 to RGB888. | ||
| 974 | */ | ||
| 975 | if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && | ||
| 976 | !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10)) | ||
| 977 | s->output_mode = ROCKCHIP_OUT_MODE_P888; | ||
| 970 | VOP_CTRL_SET(vop, out_mode, s->output_mode); | 978 | VOP_CTRL_SET(vop, out_mode, s->output_mode); |
| 971 | 979 | ||
| 972 | VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len); | 980 | VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len); |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h index 5a4faa85dbd2..9979fd0c2282 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h | |||
| @@ -142,6 +142,9 @@ struct vop_data { | |||
| 142 | const struct vop_intr *intr; | 142 | const struct vop_intr *intr; |
| 143 | const struct vop_win_data *win; | 143 | const struct vop_win_data *win; |
| 144 | unsigned int win_size; | 144 | unsigned int win_size; |
| 145 | |||
| 146 | #define VOP_FEATURE_OUTPUT_RGB10 BIT(0) | ||
| 147 | u64 feature; | ||
| 145 | }; | 148 | }; |
| 146 | 149 | ||
| 147 | /* interrupt define */ | 150 | /* interrupt define */ |
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index 0da44442aab0..bafd698a28b1 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c | |||
| @@ -275,6 +275,7 @@ static const struct vop_intr rk3288_vop_intr = { | |||
| 275 | static const struct vop_data rk3288_vop = { | 275 | static const struct vop_data rk3288_vop = { |
| 276 | .init_table = rk3288_init_reg_table, | 276 | .init_table = rk3288_init_reg_table, |
| 277 | .table_size = ARRAY_SIZE(rk3288_init_reg_table), | 277 | .table_size = ARRAY_SIZE(rk3288_init_reg_table), |
| 278 | .feature = VOP_FEATURE_OUTPUT_RGB10, | ||
| 278 | .intr = &rk3288_vop_intr, | 279 | .intr = &rk3288_vop_intr, |
| 279 | .ctrl = &rk3288_ctrl_data, | 280 | .ctrl = &rk3288_ctrl_data, |
| 280 | .win = rk3288_vop_win_data, | 281 | .win = rk3288_vop_win_data, |
| @@ -343,6 +344,7 @@ static const struct vop_reg_data rk3399_init_reg_table[] = { | |||
| 343 | static const struct vop_data rk3399_vop_big = { | 344 | static const struct vop_data rk3399_vop_big = { |
| 344 | .init_table = rk3399_init_reg_table, | 345 | .init_table = rk3399_init_reg_table, |
| 345 | .table_size = ARRAY_SIZE(rk3399_init_reg_table), | 346 | .table_size = ARRAY_SIZE(rk3399_init_reg_table), |
| 347 | .feature = VOP_FEATURE_OUTPUT_RGB10, | ||
| 346 | .intr = &rk3399_vop_intr, | 348 | .intr = &rk3399_vop_intr, |
| 347 | .ctrl = &rk3399_ctrl_data, | 349 | .ctrl = &rk3399_ctrl_data, |
| 348 | /* | 350 | /* |
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 9a1e34e48f64..81f86a67c10d 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c | |||
| @@ -451,18 +451,6 @@ fail: | |||
| 451 | 451 | ||
| 452 | 452 | ||
| 453 | #ifdef CONFIG_DRM_TEGRA_STAGING | 453 | #ifdef CONFIG_DRM_TEGRA_STAGING |
| 454 | static struct tegra_drm_context * | ||
| 455 | tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id) | ||
| 456 | { | ||
| 457 | struct tegra_drm_context *context; | ||
| 458 | |||
| 459 | mutex_lock(&file->lock); | ||
| 460 | context = idr_find(&file->contexts, id); | ||
| 461 | mutex_unlock(&file->lock); | ||
| 462 | |||
| 463 | return context; | ||
| 464 | } | ||
| 465 | |||
| 466 | static int tegra_gem_create(struct drm_device *drm, void *data, | 454 | static int tegra_gem_create(struct drm_device *drm, void *data, |
| 467 | struct drm_file *file) | 455 | struct drm_file *file) |
| 468 | { | 456 | { |
| @@ -551,7 +539,7 @@ static int tegra_client_open(struct tegra_drm_file *fpriv, | |||
| 551 | if (err < 0) | 539 | if (err < 0) |
| 552 | return err; | 540 | return err; |
| 553 | 541 | ||
| 554 | err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL); | 542 | err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL); |
| 555 | if (err < 0) { | 543 | if (err < 0) { |
| 556 | client->ops->close_channel(context); | 544 | client->ops->close_channel(context); |
| 557 | return err; | 545 | return err; |
| @@ -606,7 +594,7 @@ static int tegra_close_channel(struct drm_device *drm, void *data, | |||
| 606 | 594 | ||
| 607 | mutex_lock(&fpriv->lock); | 595 | mutex_lock(&fpriv->lock); |
| 608 | 596 | ||
| 609 | context = tegra_drm_file_get_context(fpriv, args->context); | 597 | context = idr_find(&fpriv->contexts, args->context); |
| 610 | if (!context) { | 598 | if (!context) { |
| 611 | err = -EINVAL; | 599 | err = -EINVAL; |
| 612 | goto unlock; | 600 | goto unlock; |
| @@ -631,7 +619,7 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data, | |||
| 631 | 619 | ||
| 632 | mutex_lock(&fpriv->lock); | 620 | mutex_lock(&fpriv->lock); |
| 633 | 621 | ||
| 634 | context = tegra_drm_file_get_context(fpriv, args->context); | 622 | context = idr_find(&fpriv->contexts, args->context); |
| 635 | if (!context) { | 623 | if (!context) { |
| 636 | err = -ENODEV; | 624 | err = -ENODEV; |
| 637 | goto unlock; | 625 | goto unlock; |
| @@ -660,7 +648,7 @@ static int tegra_submit(struct drm_device *drm, void *data, | |||
| 660 | 648 | ||
| 661 | mutex_lock(&fpriv->lock); | 649 | mutex_lock(&fpriv->lock); |
| 662 | 650 | ||
| 663 | context = tegra_drm_file_get_context(fpriv, args->context); | 651 | context = idr_find(&fpriv->contexts, args->context); |
| 664 | if (!context) { | 652 | if (!context) { |
| 665 | err = -ENODEV; | 653 | err = -ENODEV; |
| 666 | goto unlock; | 654 | goto unlock; |
| @@ -685,7 +673,7 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data, | |||
| 685 | 673 | ||
| 686 | mutex_lock(&fpriv->lock); | 674 | mutex_lock(&fpriv->lock); |
| 687 | 675 | ||
| 688 | context = tegra_drm_file_get_context(fpriv, args->context); | 676 | context = idr_find(&fpriv->contexts, args->context); |
| 689 | if (!context) { | 677 | if (!context) { |
| 690 | err = -ENODEV; | 678 | err = -ENODEV; |
| 691 | goto unlock; | 679 | goto unlock; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 13db8a2851ed..1f013d45c9e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c | |||
| @@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) | |||
| 321 | list_for_each_entry_safe(entry, next, &man->list, head) | 321 | list_for_each_entry_safe(entry, next, &man->list, head) |
| 322 | vmw_cmdbuf_res_free(man, entry); | 322 | vmw_cmdbuf_res_free(man, entry); |
| 323 | 323 | ||
| 324 | drm_ht_remove(&man->resources); | ||
| 324 | kfree(man); | 325 | kfree(man); |
| 325 | } | 326 | } |
| 326 | 327 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 130d51c5ec6a..4b948fba9eec 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -41,9 +41,9 @@ | |||
| 41 | #include <drm/ttm/ttm_module.h> | 41 | #include <drm/ttm/ttm_module.h> |
| 42 | #include "vmwgfx_fence.h" | 42 | #include "vmwgfx_fence.h" |
| 43 | 43 | ||
| 44 | #define VMWGFX_DRIVER_DATE "20170221" | 44 | #define VMWGFX_DRIVER_DATE "20170607" |
| 45 | #define VMWGFX_DRIVER_MAJOR 2 | 45 | #define VMWGFX_DRIVER_MAJOR 2 |
| 46 | #define VMWGFX_DRIVER_MINOR 12 | 46 | #define VMWGFX_DRIVER_MINOR 13 |
| 47 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 47 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
| 48 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 48 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
| 49 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 49 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index b6a0806b06bf..a1c68e6a689e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
| @@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, | |||
| 368 | return fifo_state->static_buffer; | 368 | return fifo_state->static_buffer; |
| 369 | else { | 369 | else { |
| 370 | fifo_state->dynamic_buffer = vmalloc(bytes); | 370 | fifo_state->dynamic_buffer = vmalloc(bytes); |
| 371 | if (!fifo_state->dynamic_buffer) | ||
| 372 | goto out_err; | ||
| 371 | return fifo_state->dynamic_buffer; | 373 | return fifo_state->dynamic_buffer; |
| 372 | } | 374 | } |
| 373 | } | 375 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index ef9f3a2a4030..1d2db5d912b0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -274,108 +274,6 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) | |||
| 274 | } | 274 | } |
| 275 | 275 | ||
| 276 | 276 | ||
| 277 | |||
| 278 | /** | ||
| 279 | * vmw_du_cursor_plane_update() - Update cursor image and location | ||
| 280 | * | ||
| 281 | * @plane: plane object to update | ||
| 282 | * @crtc: owning CRTC of @plane | ||
| 283 | * @fb: framebuffer to flip onto plane | ||
| 284 | * @crtc_x: x offset of plane on crtc | ||
| 285 | * @crtc_y: y offset of plane on crtc | ||
| 286 | * @crtc_w: width of plane rectangle on crtc | ||
| 287 | * @crtc_h: height of plane rectangle on crtc | ||
| 288 | * @src_x: Not used | ||
| 289 | * @src_y: Not used | ||
| 290 | * @src_w: Not used | ||
| 291 | * @src_h: Not used | ||
| 292 | * | ||
| 293 | * | ||
| 294 | * RETURNS: | ||
| 295 | * Zero on success, error code on failure | ||
| 296 | */ | ||
| 297 | int vmw_du_cursor_plane_update(struct drm_plane *plane, | ||
| 298 | struct drm_crtc *crtc, | ||
| 299 | struct drm_framebuffer *fb, | ||
| 300 | int crtc_x, int crtc_y, | ||
| 301 | unsigned int crtc_w, | ||
| 302 | unsigned int crtc_h, | ||
| 303 | uint32_t src_x, uint32_t src_y, | ||
| 304 | uint32_t src_w, uint32_t src_h) | ||
| 305 | { | ||
| 306 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); | ||
| 307 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); | ||
| 308 | struct vmw_surface *surface = NULL; | ||
| 309 | struct vmw_dma_buffer *dmabuf = NULL; | ||
| 310 | s32 hotspot_x, hotspot_y; | ||
| 311 | int ret; | ||
| 312 | |||
| 313 | hotspot_x = du->hotspot_x + fb->hot_x; | ||
| 314 | hotspot_y = du->hotspot_y + fb->hot_y; | ||
| 315 | |||
| 316 | /* A lot of the code assumes this */ | ||
| 317 | if (crtc_w != 64 || crtc_h != 64) { | ||
| 318 | ret = -EINVAL; | ||
| 319 | goto out; | ||
| 320 | } | ||
| 321 | |||
| 322 | if (vmw_framebuffer_to_vfb(fb)->dmabuf) | ||
| 323 | dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer; | ||
| 324 | else | ||
| 325 | surface = vmw_framebuffer_to_vfbs(fb)->surface; | ||
| 326 | |||
| 327 | if (surface && !surface->snooper.image) { | ||
| 328 | DRM_ERROR("surface not suitable for cursor\n"); | ||
| 329 | ret = -EINVAL; | ||
| 330 | goto out; | ||
| 331 | } | ||
| 332 | |||
| 333 | /* setup new image */ | ||
| 334 | ret = 0; | ||
| 335 | if (surface) { | ||
| 336 | /* vmw_user_surface_lookup takes one reference */ | ||
| 337 | du->cursor_surface = surface; | ||
| 338 | |||
| 339 | du->cursor_age = du->cursor_surface->snooper.age; | ||
| 340 | |||
| 341 | ret = vmw_cursor_update_image(dev_priv, surface->snooper.image, | ||
| 342 | 64, 64, hotspot_x, hotspot_y); | ||
| 343 | } else if (dmabuf) { | ||
| 344 | /* vmw_user_surface_lookup takes one reference */ | ||
| 345 | du->cursor_dmabuf = dmabuf; | ||
| 346 | |||
| 347 | ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h, | ||
| 348 | hotspot_x, hotspot_y); | ||
| 349 | } else { | ||
| 350 | vmw_cursor_update_position(dev_priv, false, 0, 0); | ||
| 351 | goto out; | ||
| 352 | } | ||
| 353 | |||
| 354 | if (!ret) { | ||
| 355 | du->cursor_x = crtc_x + du->set_gui_x; | ||
| 356 | du->cursor_y = crtc_y + du->set_gui_y; | ||
| 357 | |||
| 358 | vmw_cursor_update_position(dev_priv, true, | ||
| 359 | du->cursor_x + hotspot_x, | ||
| 360 | du->cursor_y + hotspot_y); | ||
| 361 | } | ||
| 362 | |||
| 363 | out: | ||
| 364 | return ret; | ||
| 365 | } | ||
| 366 | |||
| 367 | |||
| 368 | int vmw_du_cursor_plane_disable(struct drm_plane *plane) | ||
| 369 | { | ||
| 370 | if (plane->fb) { | ||
| 371 | drm_framebuffer_unreference(plane->fb); | ||
| 372 | plane->fb = NULL; | ||
| 373 | } | ||
| 374 | |||
| 375 | return -EINVAL; | ||
| 376 | } | ||
| 377 | |||
| 378 | |||
| 379 | void vmw_du_cursor_plane_destroy(struct drm_plane *plane) | 277 | void vmw_du_cursor_plane_destroy(struct drm_plane *plane) |
| 380 | { | 278 | { |
| 381 | vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); | 279 | vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); |
| @@ -473,18 +371,6 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, | |||
| 473 | 371 | ||
| 474 | 372 | ||
| 475 | void | 373 | void |
| 476 | vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane, | ||
| 477 | struct drm_plane_state *old_state) | ||
| 478 | { | ||
| 479 | struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; | ||
| 480 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); | ||
| 481 | |||
| 482 | drm_atomic_set_fb_for_plane(plane->state, NULL); | ||
| 483 | vmw_cursor_update_position(dev_priv, false, 0, 0); | ||
| 484 | } | ||
| 485 | |||
| 486 | |||
| 487 | void | ||
| 488 | vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, | 374 | vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, |
| 489 | struct drm_plane_state *old_state) | 375 | struct drm_plane_state *old_state) |
| 490 | { | 376 | { |
| @@ -1498,6 +1384,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv, | |||
| 1498 | */ | 1384 | */ |
| 1499 | if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && | 1385 | if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && |
| 1500 | dmabuf && only_2d && | 1386 | dmabuf && only_2d && |
| 1387 | mode_cmd->width > 64 && /* Don't create a proxy for cursor */ | ||
| 1501 | dev_priv->active_display_unit == vmw_du_screen_target) { | 1388 | dev_priv->active_display_unit == vmw_du_screen_target) { |
| 1502 | ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd, | 1389 | ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd, |
| 1503 | dmabuf, &surface); | 1390 | dmabuf, &surface); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 13f2f1d2818a..5f8d678ae675 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | |||
| @@ -256,10 +256,6 @@ int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, | |||
| 256 | u16 *r, u16 *g, u16 *b, | 256 | u16 *r, u16 *g, u16 *b, |
| 257 | uint32_t size, | 257 | uint32_t size, |
| 258 | struct drm_modeset_acquire_ctx *ctx); | 258 | struct drm_modeset_acquire_ctx *ctx); |
| 259 | int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, | ||
| 260 | uint32_t handle, uint32_t width, uint32_t height, | ||
| 261 | int32_t hot_x, int32_t hot_y); | ||
| 262 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); | ||
| 263 | int vmw_du_connector_set_property(struct drm_connector *connector, | 259 | int vmw_du_connector_set_property(struct drm_connector *connector, |
| 264 | struct drm_property *property, | 260 | struct drm_property *property, |
| 265 | uint64_t val); | 261 | uint64_t val); |
| @@ -339,15 +335,6 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv, | |||
| 339 | /* Universal Plane Helpers */ | 335 | /* Universal Plane Helpers */ |
| 340 | void vmw_du_primary_plane_destroy(struct drm_plane *plane); | 336 | void vmw_du_primary_plane_destroy(struct drm_plane *plane); |
| 341 | void vmw_du_cursor_plane_destroy(struct drm_plane *plane); | 337 | void vmw_du_cursor_plane_destroy(struct drm_plane *plane); |
| 342 | int vmw_du_cursor_plane_disable(struct drm_plane *plane); | ||
| 343 | int vmw_du_cursor_plane_update(struct drm_plane *plane, | ||
| 344 | struct drm_crtc *crtc, | ||
| 345 | struct drm_framebuffer *fb, | ||
| 346 | int crtc_x, int crtc_y, | ||
| 347 | unsigned int crtc_w, | ||
| 348 | unsigned int crtc_h, | ||
| 349 | uint32_t src_x, uint32_t src_y, | ||
| 350 | uint32_t src_w, uint32_t src_h); | ||
| 351 | 338 | ||
| 352 | /* Atomic Helpers */ | 339 | /* Atomic Helpers */ |
| 353 | int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, | 340 | int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, |
| @@ -356,8 +343,6 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, | |||
| 356 | struct drm_plane_state *state); | 343 | struct drm_plane_state *state); |
| 357 | void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, | 344 | void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, |
| 358 | struct drm_plane_state *old_state); | 345 | struct drm_plane_state *old_state); |
| 359 | void vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane, | ||
| 360 | struct drm_plane_state *old_state); | ||
| 361 | int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, | 346 | int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, |
| 362 | struct drm_plane_state *new_state); | 347 | struct drm_plane_state *new_state); |
| 363 | void vmw_du_plane_cleanup_fb(struct drm_plane *plane, | 348 | void vmw_du_plane_cleanup_fb(struct drm_plane *plane, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index bad31bdf09b6..50be1f034f9e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | |||
| @@ -56,6 +56,8 @@ enum stdu_content_type { | |||
| 56 | * @right: Right side of bounding box. | 56 | * @right: Right side of bounding box. |
| 57 | * @top: Top side of bounding box. | 57 | * @top: Top side of bounding box. |
| 58 | * @bottom: Bottom side of bounding box. | 58 | * @bottom: Bottom side of bounding box. |
| 59 | * @fb_left: Left side of the framebuffer/content bounding box | ||
| 60 | * @fb_top: Top of the framebuffer/content bounding box | ||
| 59 | * @buf: DMA buffer when DMA-ing between buffer and screen targets. | 61 | * @buf: DMA buffer when DMA-ing between buffer and screen targets. |
| 60 | * @sid: Surface ID when copying between surface and screen targets. | 62 | * @sid: Surface ID when copying between surface and screen targets. |
| 61 | */ | 63 | */ |
| @@ -63,6 +65,7 @@ struct vmw_stdu_dirty { | |||
| 63 | struct vmw_kms_dirty base; | 65 | struct vmw_kms_dirty base; |
| 64 | SVGA3dTransferType transfer; | 66 | SVGA3dTransferType transfer; |
| 65 | s32 left, right, top, bottom; | 67 | s32 left, right, top, bottom; |
| 68 | s32 fb_left, fb_top; | ||
| 66 | u32 pitch; | 69 | u32 pitch; |
| 67 | union { | 70 | union { |
| 68 | struct vmw_dma_buffer *buf; | 71 | struct vmw_dma_buffer *buf; |
| @@ -647,7 +650,7 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) | |||
| 647 | * | 650 | * |
| 648 | * @dirty: The closure structure. | 651 | * @dirty: The closure structure. |
| 649 | * | 652 | * |
| 650 | * This function calculates the bounding box for all the incoming clips | 653 | * This function calculates the bounding box for all the incoming clips. |
| 651 | */ | 654 | */ |
| 652 | static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty) | 655 | static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty) |
| 653 | { | 656 | { |
| @@ -656,11 +659,19 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty) | |||
| 656 | 659 | ||
| 657 | dirty->num_hits = 1; | 660 | dirty->num_hits = 1; |
| 658 | 661 | ||
| 659 | /* Calculate bounding box */ | 662 | /* Calculate destination bounding box */ |
| 660 | ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1); | 663 | ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1); |
| 661 | ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1); | 664 | ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1); |
| 662 | ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2); | 665 | ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2); |
| 663 | ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2); | 666 | ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2); |
| 667 | |||
| 668 | /* | ||
| 669 | * Calculate content bounding box. We only need the top-left | ||
| 670 | * coordinate because width and height will be the same as the | ||
| 671 | * destination bounding box above | ||
| 672 | */ | ||
| 673 | ddirty->fb_left = min_t(s32, ddirty->fb_left, dirty->fb_x); | ||
| 674 | ddirty->fb_top = min_t(s32, ddirty->fb_top, dirty->fb_y); | ||
| 664 | } | 675 | } |
| 665 | 676 | ||
| 666 | 677 | ||
| @@ -697,11 +708,11 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) | |||
| 697 | /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */ | 708 | /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */ |
| 698 | src_pitch = stdu->display_srf->base_size.width * stdu->cpp; | 709 | src_pitch = stdu->display_srf->base_size.width * stdu->cpp; |
| 699 | src = ttm_kmap_obj_virtual(&stdu->host_map, ¬_used); | 710 | src = ttm_kmap_obj_virtual(&stdu->host_map, ¬_used); |
| 700 | src += dirty->unit_y1 * src_pitch + dirty->unit_x1 * stdu->cpp; | 711 | src += ddirty->top * src_pitch + ddirty->left * stdu->cpp; |
| 701 | 712 | ||
| 702 | dst_pitch = ddirty->pitch; | 713 | dst_pitch = ddirty->pitch; |
| 703 | dst = ttm_kmap_obj_virtual(&stdu->guest_map, ¬_used); | 714 | dst = ttm_kmap_obj_virtual(&stdu->guest_map, ¬_used); |
| 704 | dst += dirty->fb_y * dst_pitch + dirty->fb_x * stdu->cpp; | 715 | dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp; |
| 705 | 716 | ||
| 706 | 717 | ||
| 707 | /* Figure out the real direction */ | 718 | /* Figure out the real direction */ |
| @@ -760,7 +771,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) | |||
| 760 | } | 771 | } |
| 761 | 772 | ||
| 762 | out_cleanup: | 773 | out_cleanup: |
| 763 | ddirty->left = ddirty->top = S32_MAX; | 774 | ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX; |
| 764 | ddirty->right = ddirty->bottom = S32_MIN; | 775 | ddirty->right = ddirty->bottom = S32_MIN; |
| 765 | } | 776 | } |
| 766 | 777 | ||
| @@ -812,6 +823,7 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, | |||
| 812 | SVGA3D_READ_HOST_VRAM; | 823 | SVGA3D_READ_HOST_VRAM; |
| 813 | ddirty.left = ddirty.top = S32_MAX; | 824 | ddirty.left = ddirty.top = S32_MAX; |
| 814 | ddirty.right = ddirty.bottom = S32_MIN; | 825 | ddirty.right = ddirty.bottom = S32_MIN; |
| 826 | ddirty.fb_left = ddirty.fb_top = S32_MAX; | ||
| 815 | ddirty.pitch = vfb->base.pitches[0]; | 827 | ddirty.pitch = vfb->base.pitches[0]; |
| 816 | ddirty.buf = buf; | 828 | ddirty.buf = buf; |
| 817 | ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit; | 829 | ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit; |
| @@ -1355,6 +1367,11 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, | |||
| 1355 | DRM_ERROR("Failed to bind surface to STDU.\n"); | 1367 | DRM_ERROR("Failed to bind surface to STDU.\n"); |
| 1356 | else | 1368 | else |
| 1357 | crtc->primary->fb = plane->state->fb; | 1369 | crtc->primary->fb = plane->state->fb; |
| 1370 | |||
| 1371 | ret = vmw_stdu_update_st(dev_priv, stdu); | ||
| 1372 | |||
| 1373 | if (ret) | ||
| 1374 | DRM_ERROR("Failed to update STDU.\n"); | ||
| 1358 | } | 1375 | } |
| 1359 | 1376 | ||
| 1360 | 1377 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 7681341fe32b..6b70bd259953 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
| @@ -1274,11 +1274,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 1274 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 1274 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 1275 | int ret; | 1275 | int ret; |
| 1276 | uint32_t size; | 1276 | uint32_t size; |
| 1277 | uint32_t backup_handle; | 1277 | uint32_t backup_handle = 0; |
| 1278 | 1278 | ||
| 1279 | if (req->multisample_count != 0) | 1279 | if (req->multisample_count != 0) |
| 1280 | return -EINVAL; | 1280 | return -EINVAL; |
| 1281 | 1281 | ||
| 1282 | if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS) | ||
| 1283 | return -EINVAL; | ||
| 1284 | |||
| 1282 | if (unlikely(vmw_user_surface_size == 0)) | 1285 | if (unlikely(vmw_user_surface_size == 0)) |
| 1283 | vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + | 1286 | vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + |
| 1284 | 128; | 1287 | 128; |
| @@ -1314,12 +1317,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 1314 | ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, | 1317 | ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, |
| 1315 | &res->backup, | 1318 | &res->backup, |
| 1316 | &user_srf->backup_base); | 1319 | &user_srf->backup_base); |
| 1317 | if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < | 1320 | if (ret == 0) { |
| 1318 | res->backup_size) { | 1321 | if (res->backup->base.num_pages * PAGE_SIZE < |
| 1319 | DRM_ERROR("Surface backup buffer is too small.\n"); | 1322 | res->backup_size) { |
| 1320 | vmw_dmabuf_unreference(&res->backup); | 1323 | DRM_ERROR("Surface backup buffer is too small.\n"); |
| 1321 | ret = -EINVAL; | 1324 | vmw_dmabuf_unreference(&res->backup); |
| 1322 | goto out_unlock; | 1325 | ret = -EINVAL; |
| 1326 | goto out_unlock; | ||
| 1327 | } else { | ||
| 1328 | backup_handle = req->buffer_handle; | ||
| 1329 | } | ||
| 1323 | } | 1330 | } |
| 1324 | } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) | 1331 | } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) |
| 1325 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, | 1332 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, |
| @@ -1491,7 +1498,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | |||
| 1491 | dev_priv->stdu_max_height); | 1498 | dev_priv->stdu_max_height); |
| 1492 | 1499 | ||
| 1493 | if (size.width > max_width || size.height > max_height) { | 1500 | if (size.width > max_width || size.height > max_height) { |
| 1494 | DRM_ERROR("%ux%u\n, exeeds max surface size %ux%u", | 1501 | DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u", |
| 1495 | size.width, size.height, | 1502 | size.width, size.height, |
| 1496 | max_width, max_height); | 1503 | max_width, max_height); |
| 1497 | return -EINVAL; | 1504 | return -EINVAL; |
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index f05ebb14fa63..ac65f52850a6 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c | |||
| @@ -172,7 +172,7 @@ static int host1x_probe(struct platform_device *pdev) | |||
| 172 | 172 | ||
| 173 | host->rst = devm_reset_control_get(&pdev->dev, "host1x"); | 173 | host->rst = devm_reset_control_get(&pdev->dev, "host1x"); |
| 174 | if (IS_ERR(host->rst)) { | 174 | if (IS_ERR(host->rst)) { |
| 175 | err = PTR_ERR(host->clk); | 175 | err = PTR_ERR(host->rst); |
| 176 | dev_err(&pdev->dev, "failed to get reset: %d\n", err); | 176 | dev_err(&pdev->dev, "failed to get reset: %d\n", err); |
| 177 | return err; | 177 | return err; |
| 178 | } | 178 | } |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 16d556816b5f..2fb5f432a54c 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
| @@ -725,15 +725,16 @@ void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi) | |||
| 725 | spin_lock_irqsave(&ipu->lock, flags); | 725 | spin_lock_irqsave(&ipu->lock, flags); |
| 726 | 726 | ||
| 727 | val = ipu_cm_read(ipu, IPU_CONF); | 727 | val = ipu_cm_read(ipu, IPU_CONF); |
| 728 | if (vdi) { | 728 | if (vdi) |
| 729 | val |= IPU_CONF_IC_INPUT; | 729 | val |= IPU_CONF_IC_INPUT; |
| 730 | } else { | 730 | else |
| 731 | val &= ~IPU_CONF_IC_INPUT; | 731 | val &= ~IPU_CONF_IC_INPUT; |
| 732 | if (csi_id == 1) | 732 | |
| 733 | val |= IPU_CONF_CSI_SEL; | 733 | if (csi_id == 1) |
| 734 | else | 734 | val |= IPU_CONF_CSI_SEL; |
| 735 | val &= ~IPU_CONF_CSI_SEL; | 735 | else |
| 736 | } | 736 | val &= ~IPU_CONF_CSI_SEL; |
| 737 | |||
| 737 | ipu_cm_write(ipu, val, IPU_CONF); | 738 | ipu_cm_write(ipu, val, IPU_CONF); |
| 738 | 739 | ||
| 739 | spin_unlock_irqrestore(&ipu->lock, flags); | 740 | spin_unlock_irqrestore(&ipu->lock, flags); |
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index c55563379e2e..c35f74c83065 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c | |||
| @@ -131,8 +131,6 @@ int ipu_pre_get(struct ipu_pre *pre) | |||
| 131 | if (pre->in_use) | 131 | if (pre->in_use) |
| 132 | return -EBUSY; | 132 | return -EBUSY; |
| 133 | 133 | ||
| 134 | clk_prepare_enable(pre->clk_axi); | ||
| 135 | |||
| 136 | /* first get the engine out of reset and remove clock gating */ | 134 | /* first get the engine out of reset and remove clock gating */ |
| 137 | writel(0, pre->regs + IPU_PRE_CTRL); | 135 | writel(0, pre->regs + IPU_PRE_CTRL); |
| 138 | 136 | ||
| @@ -149,12 +147,7 @@ int ipu_pre_get(struct ipu_pre *pre) | |||
| 149 | 147 | ||
| 150 | void ipu_pre_put(struct ipu_pre *pre) | 148 | void ipu_pre_put(struct ipu_pre *pre) |
| 151 | { | 149 | { |
| 152 | u32 val; | 150 | writel(IPU_PRE_CTRL_SFTRST, pre->regs + IPU_PRE_CTRL); |
| 153 | |||
| 154 | val = IPU_PRE_CTRL_SFTRST | IPU_PRE_CTRL_CLKGATE; | ||
| 155 | writel(val, pre->regs + IPU_PRE_CTRL); | ||
| 156 | |||
| 157 | clk_disable_unprepare(pre->clk_axi); | ||
| 158 | 151 | ||
| 159 | pre->in_use = false; | 152 | pre->in_use = false; |
| 160 | } | 153 | } |
| @@ -249,6 +242,8 @@ static int ipu_pre_probe(struct platform_device *pdev) | |||
| 249 | if (!pre->buffer_virt) | 242 | if (!pre->buffer_virt) |
| 250 | return -ENOMEM; | 243 | return -ENOMEM; |
| 251 | 244 | ||
| 245 | clk_prepare_enable(pre->clk_axi); | ||
| 246 | |||
| 252 | pre->dev = dev; | 247 | pre->dev = dev; |
| 253 | platform_set_drvdata(pdev, pre); | 248 | platform_set_drvdata(pdev, pre); |
| 254 | mutex_lock(&ipu_pre_list_mutex); | 249 | mutex_lock(&ipu_pre_list_mutex); |
| @@ -268,6 +263,8 @@ static int ipu_pre_remove(struct platform_device *pdev) | |||
| 268 | available_pres--; | 263 | available_pres--; |
| 269 | mutex_unlock(&ipu_pre_list_mutex); | 264 | mutex_unlock(&ipu_pre_list_mutex); |
| 270 | 265 | ||
| 266 | clk_disable_unprepare(pre->clk_axi); | ||
| 267 | |||
| 271 | if (pre->buffer_virt) | 268 | if (pre->buffer_virt) |
| 272 | gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt, | 269 | gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt, |
| 273 | IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4); | 270 | IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4); |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index fe40e5e499dd..687705c50794 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
| @@ -275,10 +275,12 @@ config HID_EMS_FF | |||
| 275 | - Trio Linker Plus II | 275 | - Trio Linker Plus II |
| 276 | 276 | ||
| 277 | config HID_ELECOM | 277 | config HID_ELECOM |
| 278 | tristate "ELECOM BM084 bluetooth mouse" | 278 | tristate "ELECOM HID devices" |
| 279 | depends on HID | 279 | depends on HID |
| 280 | ---help--- | 280 | ---help--- |
| 281 | Support for the ELECOM BM084 (bluetooth mouse). | 281 | Support for ELECOM devices: |
| 282 | - BM084 Bluetooth Mouse | ||
| 283 | - DEFT Trackball (Wired and wireless) | ||
| 282 | 284 | ||
| 283 | config HID_ELO | 285 | config HID_ELO |
| 284 | tristate "ELO USB 4000/4500 touchscreen" | 286 | tristate "ELO USB 4000/4500 touchscreen" |
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 16df6cc90235..a6268f2f7408 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c | |||
| @@ -69,6 +69,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad"); | |||
| 69 | #define QUIRK_IS_MULTITOUCH BIT(3) | 69 | #define QUIRK_IS_MULTITOUCH BIT(3) |
| 70 | #define QUIRK_NO_CONSUMER_USAGES BIT(4) | 70 | #define QUIRK_NO_CONSUMER_USAGES BIT(4) |
| 71 | #define QUIRK_USE_KBD_BACKLIGHT BIT(5) | 71 | #define QUIRK_USE_KBD_BACKLIGHT BIT(5) |
| 72 | #define QUIRK_T100_KEYBOARD BIT(6) | ||
| 72 | 73 | ||
| 73 | #define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \ | 74 | #define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \ |
| 74 | QUIRK_NO_INIT_REPORTS | \ | 75 | QUIRK_NO_INIT_REPORTS | \ |
| @@ -536,6 +537,8 @@ static void asus_remove(struct hid_device *hdev) | |||
| 536 | drvdata->kbd_backlight->removed = true; | 537 | drvdata->kbd_backlight->removed = true; |
| 537 | cancel_work_sync(&drvdata->kbd_backlight->work); | 538 | cancel_work_sync(&drvdata->kbd_backlight->work); |
| 538 | } | 539 | } |
| 540 | |||
| 541 | hid_hw_stop(hdev); | ||
| 539 | } | 542 | } |
| 540 | 543 | ||
| 541 | static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, | 544 | static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
| @@ -548,6 +551,12 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
| 548 | hid_info(hdev, "Fixing up Asus notebook report descriptor\n"); | 551 | hid_info(hdev, "Fixing up Asus notebook report descriptor\n"); |
| 549 | rdesc[55] = 0xdd; | 552 | rdesc[55] = 0xdd; |
| 550 | } | 553 | } |
| 554 | if (drvdata->quirks & QUIRK_T100_KEYBOARD && | ||
| 555 | *rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) { | ||
| 556 | hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n"); | ||
| 557 | rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT; | ||
| 558 | } | ||
| 559 | |||
| 551 | return rdesc; | 560 | return rdesc; |
| 552 | } | 561 | } |
| 553 | 562 | ||
| @@ -560,6 +569,9 @@ static const struct hid_device_id asus_devices[] = { | |||
| 560 | USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, | 569 | USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, |
| 561 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, | 570 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, |
| 562 | USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT }, | 571 | USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT }, |
| 572 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, | ||
| 573 | USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD), | ||
| 574 | QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES }, | ||
| 563 | { } | 575 | { } |
| 564 | }; | 576 | }; |
| 565 | MODULE_DEVICE_TABLE(hid, asus_devices); | 577 | MODULE_DEVICE_TABLE(hid, asus_devices); |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 37084b645785..6e040692f1d8 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
| @@ -826,11 +826,35 @@ static int hid_scan_report(struct hid_device *hid) | |||
| 826 | * hid-rmi should take care of them, | 826 | * hid-rmi should take care of them, |
| 827 | * not hid-generic | 827 | * not hid-generic |
| 828 | */ | 828 | */ |
| 829 | if (IS_ENABLED(CONFIG_HID_RMI)) | 829 | hid->group = HID_GROUP_RMI; |
| 830 | hid->group = HID_GROUP_RMI; | ||
| 831 | break; | 830 | break; |
| 832 | } | 831 | } |
| 833 | 832 | ||
| 833 | /* fall back to generic driver in case specific driver doesn't exist */ | ||
| 834 | switch (hid->group) { | ||
| 835 | case HID_GROUP_MULTITOUCH_WIN_8: | ||
| 836 | /* fall-through */ | ||
| 837 | case HID_GROUP_MULTITOUCH: | ||
| 838 | if (!IS_ENABLED(CONFIG_HID_MULTITOUCH)) | ||
| 839 | hid->group = HID_GROUP_GENERIC; | ||
| 840 | break; | ||
| 841 | case HID_GROUP_SENSOR_HUB: | ||
| 842 | if (!IS_ENABLED(CONFIG_HID_SENSOR_HUB)) | ||
| 843 | hid->group = HID_GROUP_GENERIC; | ||
| 844 | break; | ||
| 845 | case HID_GROUP_RMI: | ||
| 846 | if (!IS_ENABLED(CONFIG_HID_RMI)) | ||
| 847 | hid->group = HID_GROUP_GENERIC; | ||
| 848 | break; | ||
| 849 | case HID_GROUP_WACOM: | ||
| 850 | if (!IS_ENABLED(CONFIG_HID_WACOM)) | ||
| 851 | hid->group = HID_GROUP_GENERIC; | ||
| 852 | break; | ||
| 853 | case HID_GROUP_LOGITECH_DJ_DEVICE: | ||
| 854 | if (!IS_ENABLED(CONFIG_HID_LOGITECH_DJ)) | ||
| 855 | hid->group = HID_GROUP_GENERIC; | ||
| 856 | break; | ||
| 857 | } | ||
| 834 | vfree(parser); | 858 | vfree(parser); |
| 835 | return 0; | 859 | return 0; |
| 836 | } | 860 | } |
| @@ -1763,15 +1787,23 @@ EXPORT_SYMBOL_GPL(hid_disconnect); | |||
| 1763 | * used as a driver. See hid_scan_report(). | 1787 | * used as a driver. See hid_scan_report(). |
| 1764 | */ | 1788 | */ |
| 1765 | static const struct hid_device_id hid_have_special_driver[] = { | 1789 | static const struct hid_device_id hid_have_special_driver[] = { |
| 1790 | #if IS_ENABLED(CONFIG_HID_A4TECH) | ||
| 1766 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, | 1791 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, |
| 1767 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, | 1792 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, |
| 1768 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, | 1793 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, |
| 1794 | #endif | ||
| 1795 | #if IS_ENABLED(CONFIG_HID_ACCUTOUCH) | ||
| 1796 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) }, | ||
| 1797 | #endif | ||
| 1798 | #if IS_ENABLED(CONFIG_HID_ACRUX) | ||
| 1769 | { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, | 1799 | { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, |
| 1770 | { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) }, | 1800 | { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) }, |
| 1801 | #endif | ||
| 1802 | #if IS_ENABLED(CONFIG_HID_ALPS) | ||
| 1771 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, | 1803 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, |
| 1804 | #endif | ||
| 1805 | #if IS_ENABLED(CONFIG_HID_APPLE) | ||
| 1772 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, | 1806 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, |
| 1773 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, | ||
| 1774 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) }, | ||
| 1775 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, | 1807 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, |
| 1776 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, | 1808 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, |
| 1777 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, | 1809 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, |
| @@ -1792,11 +1824,6 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 1792 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) }, | 1824 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) }, |
| 1793 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) }, | 1825 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) }, |
| 1794 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) }, | 1826 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) }, |
| 1795 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, | ||
| 1796 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) }, | ||
| 1797 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) }, | ||
| 1798 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, | ||
| 1799 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) }, | ||
| 1800 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) }, | 1827 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) }, |
| 1801 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) }, | 1828 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) }, |
| 1802 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) }, | 1829 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) }, |
| @@ -1851,59 +1878,100 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 1851 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) }, | 1878 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) }, |
| 1852 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, | 1879 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, |
| 1853 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, | 1880 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, |
| 1881 | #endif | ||
| 1882 | #if IS_ENABLED(CONFIG_HID_APPLEIR) | ||
| 1883 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, | ||
| 1884 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) }, | ||
| 1885 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) }, | ||
| 1886 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, | ||
| 1887 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) }, | ||
| 1888 | #endif | ||
| 1889 | #if IS_ENABLED(CONFIG_HID_ASUS) | ||
| 1854 | { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) }, | 1890 | { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) }, |
| 1855 | { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) }, | 1891 | { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) }, |
| 1856 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, | 1892 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, |
| 1857 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) }, | 1893 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) }, |
| 1894 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) }, | ||
| 1895 | #endif | ||
| 1896 | #if IS_ENABLED(CONFIG_HID_AUREAL) | ||
| 1858 | { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, | 1897 | { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, |
| 1898 | #endif | ||
| 1899 | #if IS_ENABLED(CONFIG_HID_BELKIN) | ||
| 1859 | { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, | 1900 | { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, |
| 1901 | { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, | ||
| 1902 | #endif | ||
| 1903 | #if IS_ENABLED(CONFIG_HID_BETOP_FF) | ||
| 1860 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) }, | 1904 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) }, |
| 1861 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) }, | 1905 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) }, |
| 1862 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) }, | 1906 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) }, |
| 1863 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) }, | 1907 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) }, |
| 1864 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, | 1908 | #endif |
| 1865 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, | 1909 | #if IS_ENABLED(CONFIG_HID_CHERRY) |
| 1866 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, | 1910 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, |
| 1867 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, | 1911 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, |
| 1912 | #endif | ||
| 1913 | #if IS_ENABLED(CONFIG_HID_CHICONY) | ||
| 1868 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, | 1914 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, |
| 1869 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) }, | ||
| 1870 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, | 1915 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, |
| 1871 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, | 1916 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, |
| 1872 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, | 1917 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, |
| 1918 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, | ||
| 1919 | #endif | ||
| 1920 | #if IS_ENABLED(CONFIG_HID_CMEDIA) | ||
| 1921 | { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) }, | ||
| 1922 | #endif | ||
| 1923 | #if IS_ENABLED(CONFIG_HID_CORSAIR) | ||
| 1873 | { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, | 1924 | { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, |
| 1874 | { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, | 1925 | { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, |
| 1875 | { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, | 1926 | #endif |
| 1927 | #if IS_ENABLED(CONFIG_HID_CP2112) | ||
| 1876 | { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, | 1928 | { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, |
| 1929 | #endif | ||
| 1930 | #if IS_ENABLED(CONFIG_HID_CYPRESS) | ||
| 1877 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, | 1931 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, |
| 1878 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) }, | 1932 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) }, |
| 1879 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) }, | 1933 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) }, |
| 1880 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) }, | 1934 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) }, |
| 1881 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, | 1935 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, |
| 1882 | { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) }, | 1936 | #endif |
| 1937 | #if IS_ENABLED(CONFIG_HID_DRAGONRISE) | ||
| 1883 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, | 1938 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, |
| 1884 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) }, | 1939 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) }, |
| 1885 | #if IS_ENABLED(CONFIG_HID_MAYFLASH) | ||
| 1886 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) }, | ||
| 1887 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) }, | ||
| 1888 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) }, | ||
| 1889 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) }, | ||
| 1890 | #endif | 1940 | #endif |
| 1891 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, | 1941 | #if IS_ENABLED(CONFIG_HID_ELECOM) |
| 1892 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) }, | ||
| 1893 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, | 1942 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, |
| 1943 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, | ||
| 1944 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, | ||
| 1945 | #endif | ||
| 1946 | #if IS_ENABLED(CONFIG_HID_ELO) | ||
| 1894 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, | 1947 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, |
| 1895 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, | 1948 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, |
| 1896 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) }, | 1949 | #endif |
| 1950 | #if IS_ENABLED(CONFIG_HID_EMS_FF) | ||
| 1897 | { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) }, | 1951 | { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) }, |
| 1952 | #endif | ||
| 1953 | #if IS_ENABLED(CONFIG_HID_EZKEY) | ||
| 1898 | { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, | 1954 | { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, |
| 1899 | { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, | 1955 | #endif |
| 1900 | { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) }, | 1956 | #if IS_ENABLED(CONFIG_HID_GEMBIRD) |
| 1901 | { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) }, | 1957 | { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) }, |
| 1902 | { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) }, | 1958 | #endif |
| 1959 | #if IS_ENABLED(CONFIG_HID_GFRM) | ||
| 1960 | { HID_BLUETOOTH_DEVICE(0x58, 0x2000) }, | ||
| 1961 | { HID_BLUETOOTH_DEVICE(0x471, 0x2210) }, | ||
| 1962 | #endif | ||
| 1963 | #if IS_ENABLED(CONFIG_HID_GREENASIA) | ||
| 1903 | { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, | 1964 | { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, |
| 1965 | #endif | ||
| 1966 | #if IS_ENABLED(CONFIG_HID_GT683R) | ||
| 1967 | { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, | ||
| 1968 | #endif | ||
| 1969 | #if IS_ENABLED(CONFIG_HID_GYRATION) | ||
| 1904 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, | 1970 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, |
| 1905 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, | 1971 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, |
| 1906 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, | 1972 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, |
| 1973 | #endif | ||
| 1974 | #if IS_ENABLED(CONFIG_HID_HOLTEK) | ||
| 1907 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) }, | 1975 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) }, |
| 1908 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, | 1976 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, |
| 1909 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, | 1977 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, |
| @@ -1912,12 +1980,17 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 1912 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) }, | 1980 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) }, |
| 1913 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, | 1981 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, |
| 1914 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, | 1982 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, |
| 1915 | { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, | 1983 | #endif |
| 1916 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, | 1984 | #if IS_ENABLED(CONFIG_HID_ICADE) |
| 1917 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, | ||
| 1918 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, | 1985 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, |
| 1986 | #endif | ||
| 1987 | #if IS_ENABLED(CONFIG_HID_KENSINGTON) | ||
| 1919 | { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, | 1988 | { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, |
| 1989 | #endif | ||
| 1990 | #if IS_ENABLED(CONFIG_HID_KEYTOUCH) | ||
| 1920 | { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, | 1991 | { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, |
| 1992 | #endif | ||
| 1993 | #if IS_ENABLED(CONFIG_HID_KYE) | ||
| 1921 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, | 1994 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, |
| 1922 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) }, | 1995 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) }, |
| 1923 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, | 1996 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, |
| @@ -1927,21 +2000,29 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 1927 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) }, | 2000 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) }, |
| 1928 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, | 2001 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, |
| 1929 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) }, | 2002 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) }, |
| 1930 | { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, | 2003 | #endif |
| 2004 | #if IS_ENABLED(CONFIG_HID_LCPOWER) | ||
| 1931 | { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, | 2005 | { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, |
| 2006 | #endif | ||
| 2007 | #if IS_ENABLED(CONFIG_HID_LED) | ||
| 2008 | { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) }, | ||
| 2009 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, | ||
| 2010 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) }, | ||
| 2011 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) }, | ||
| 2012 | { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, | ||
| 2013 | { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, | ||
| 2014 | #endif | ||
| 1932 | #if IS_ENABLED(CONFIG_HID_LENOVO) | 2015 | #if IS_ENABLED(CONFIG_HID_LENOVO) |
| 1933 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, | 2016 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, |
| 1934 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, | 2017 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, |
| 1935 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, | 2018 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, |
| 1936 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, | 2019 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, |
| 1937 | #endif | 2020 | #endif |
| 1938 | { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, | 2021 | #if IS_ENABLED(CONFIG_HID_LOGITECH) |
| 1939 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, | 2022 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, |
| 1940 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, | 2023 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, |
| 1941 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, | 2024 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, |
| 1942 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) }, | 2025 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) }, |
| 1943 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) }, | ||
| 1944 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) }, | ||
| 1945 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) }, | 2026 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) }, |
| 1946 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) }, | 2027 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) }, |
| 1947 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) }, | 2028 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) }, |
| @@ -1954,7 +2035,6 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 1954 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) }, | 2035 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) }, |
| 1955 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) }, | 2036 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) }, |
| 1956 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) }, | 2037 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) }, |
| 1957 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) }, | ||
| 1958 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, | 2038 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, |
| 1959 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) }, | 2039 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) }, |
| 1960 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) }, | 2040 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) }, |
| @@ -1966,17 +2046,30 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 1966 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) }, | 2046 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) }, |
| 1967 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, | 2047 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, |
| 1968 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) }, | 2048 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) }, |
| 1969 | #if IS_ENABLED(CONFIG_HID_LOGITECH_DJ) | ||
| 1970 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) }, | ||
| 1971 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) }, | ||
| 1972 | #endif | ||
| 1973 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) }, | 2049 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) }, |
| 1974 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) }, | 2050 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) }, |
| 1975 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, | 2051 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, |
| 1976 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, | 2052 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, |
| 1977 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, | 2053 | #endif |
| 1978 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, | 2054 | #if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP) |
| 1979 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) }, | 2055 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) }, |
| 2056 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) }, | ||
| 2057 | #endif | ||
| 2058 | #if IS_ENABLED(CONFIG_HID_LOGITECH_DJ) | ||
| 2059 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) }, | ||
| 2060 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) }, | ||
| 2061 | #endif | ||
| 2062 | #if IS_ENABLED(CONFIG_HID_MAGICMOUSE) | ||
| 2063 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, | ||
| 2064 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) }, | ||
| 2065 | #endif | ||
| 2066 | #if IS_ENABLED(CONFIG_HID_MAYFLASH) | ||
| 2067 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) }, | ||
| 2068 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) }, | ||
| 2069 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) }, | ||
| 2070 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) }, | ||
| 2071 | #endif | ||
| 2072 | #if IS_ENABLED(CONFIG_HID_MICROSOFT) | ||
| 1980 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) }, | 2073 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) }, |
| 1981 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) }, | 2074 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) }, |
| 1982 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, | 2075 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, |
| @@ -1992,9 +2085,22 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 1992 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) }, | 2085 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) }, |
| 1993 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) }, | 2086 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) }, |
| 1994 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) }, | 2087 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) }, |
| 2088 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, | ||
| 2089 | #endif | ||
| 2090 | #if IS_ENABLED(CONFIG_HID_MONTEREY) | ||
| 1995 | { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, | 2091 | { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, |
| 1996 | { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, | 2092 | #endif |
| 2093 | #if IS_ENABLED(CONFIG_HID_MULTITOUCH) | ||
| 2094 | { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, | ||
| 2095 | #endif | ||
| 2096 | #if IS_ENABLED(CONFIG_HID_WIIMOTE) | ||
| 2097 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, | ||
| 2098 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, | ||
| 2099 | #endif | ||
| 2100 | #if IS_ENABLED(CONFIG_HID_NTI) | ||
| 1997 | { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) }, | 2101 | { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) }, |
| 2102 | #endif | ||
| 2103 | #if IS_ENABLED(CONFIG_HID_NTRIG) | ||
| 1998 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, | 2104 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, |
| 1999 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, | 2105 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, |
| 2000 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) }, | 2106 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) }, |
| @@ -2014,13 +2120,41 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 2014 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) }, | 2120 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) }, |
| 2015 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) }, | 2121 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) }, |
| 2016 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) }, | 2122 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) }, |
| 2123 | #endif | ||
| 2124 | #if IS_ENABLED(CONFIG_HID_ORTEK) | ||
| 2017 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, | 2125 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, |
| 2018 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, | 2126 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, |
| 2127 | { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, | ||
| 2128 | #endif | ||
| 2129 | #if IS_ENABLED(CONFIG_HID_PANTHERLORD) | ||
| 2130 | { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, | ||
| 2131 | { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) }, | ||
| 2132 | { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) }, | ||
| 2133 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, | ||
| 2134 | #endif | ||
| 2135 | #if IS_ENABLED(CONFIG_HID_PENMOUNT) | ||
| 2019 | { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) }, | 2136 | { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) }, |
| 2137 | #endif | ||
| 2138 | #if IS_ENABLED(CONFIG_HID_PETALYNX) | ||
| 2020 | { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, | 2139 | { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, |
| 2140 | #endif | ||
| 2141 | #if IS_ENABLED(CONFIG_HID_PICOLCD) | ||
| 2142 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, | ||
| 2143 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, | ||
| 2144 | #endif | ||
| 2145 | #if IS_ENABLED(CONFIG_HID_PLANTRONICS) | ||
| 2021 | { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, | 2146 | { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, |
| 2147 | #endif | ||
| 2148 | #if IS_ENABLED(CONFIG_HID_PRIMAX) | ||
| 2022 | { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, | 2149 | { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, |
| 2023 | { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, | 2150 | #endif |
| 2151 | #if IS_ENABLED(CONFIG_HID_PRODIKEYS) | ||
| 2152 | { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, | ||
| 2153 | #endif | ||
| 2154 | #if IS_ENABLED(CONFIG_HID_RMI) | ||
| 2155 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) }, | ||
| 2156 | { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) }, | ||
| 2157 | #endif | ||
| 2024 | #if IS_ENABLED(CONFIG_HID_ROCCAT) | 2158 | #if IS_ENABLED(CONFIG_HID_ROCCAT) |
| 2025 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, | 2159 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, |
| 2026 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, | 2160 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, |
| @@ -2048,9 +2182,21 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 2048 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) }, | 2182 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) }, |
| 2049 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, | 2183 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, |
| 2050 | #endif | 2184 | #endif |
| 2185 | #if IS_ENABLED(CONFIG_HID_SAMSUNG) | ||
| 2051 | { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, | 2186 | { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, |
| 2052 | { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, | 2187 | { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, |
| 2053 | { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, | 2188 | #endif |
| 2189 | #if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS) | ||
| 2190 | { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) }, | ||
| 2191 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, | ||
| 2192 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, | ||
| 2193 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, | ||
| 2194 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, | ||
| 2195 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) }, | ||
| 2196 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) }, | ||
| 2197 | #endif | ||
| 2198 | #if IS_ENABLED(CONFIG_HID_SONY) | ||
| 2199 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) }, | ||
| 2054 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) }, | 2200 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) }, |
| 2055 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) }, | 2201 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) }, |
| 2056 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) }, | 2202 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) }, |
| @@ -2069,9 +2215,17 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 2069 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, | 2215 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, |
| 2070 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, | 2216 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, |
| 2071 | { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) }, | 2217 | { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) }, |
| 2218 | #endif | ||
| 2219 | #if IS_ENABLED(CONFIG_HID_SPEEDLINK) | ||
| 2220 | { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, | ||
| 2221 | #endif | ||
| 2222 | #if IS_ENABLED(CONFIG_HID_STEELSERIES) | ||
| 2072 | { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, | 2223 | { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, |
| 2224 | #endif | ||
| 2225 | #if IS_ENABLED(CONFIG_HID_SUNPLUS) | ||
| 2073 | { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, | 2226 | { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, |
| 2074 | { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, | 2227 | #endif |
| 2228 | #if IS_ENABLED(CONFIG_HID_THRUSTMASTER) | ||
| 2075 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, | 2229 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, |
| 2076 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, | 2230 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, |
| 2077 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) }, | 2231 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) }, |
| @@ -2080,12 +2234,25 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 2080 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) }, | 2234 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) }, |
| 2081 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, | 2235 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, |
| 2082 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, | 2236 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, |
| 2237 | #endif | ||
| 2238 | #if IS_ENABLED(CONFIG_HID_TIVO) | ||
| 2083 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, | 2239 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, |
| 2084 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, | 2240 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, |
| 2085 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, | 2241 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, |
| 2242 | #endif | ||
| 2243 | #if IS_ENABLED(CONFIG_HID_TOPSEED) | ||
| 2244 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, | ||
| 2245 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, | ||
| 2246 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) }, | ||
| 2086 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, | 2247 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, |
| 2087 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, | 2248 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, |
| 2249 | #endif | ||
| 2250 | #if IS_ENABLED(CONFIG_HID_TWINHAN) | ||
| 2088 | { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, | 2251 | { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, |
| 2252 | #endif | ||
| 2253 | #if IS_ENABLED(CONFIG_HID_UCLOGIC) | ||
| 2254 | { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, | ||
| 2255 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) }, | ||
| 2089 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) }, | 2256 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) }, |
| 2090 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) }, | 2257 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) }, |
| 2091 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) }, | 2258 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) }, |
| @@ -2093,20 +2260,17 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 2093 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) }, | 2260 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) }, |
| 2094 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) }, | 2261 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) }, |
| 2095 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, | 2262 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, |
| 2096 | { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) }, | ||
| 2097 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) }, | 2263 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) }, |
| 2098 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) }, | 2264 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) }, |
| 2099 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, | 2265 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, |
| 2100 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, | 2266 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, |
| 2101 | { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, | ||
| 2102 | { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) }, | 2267 | { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) }, |
| 2103 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, | 2268 | { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, |
| 2104 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, | 2269 | #endif |
| 2105 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, | 2270 | #if IS_ENABLED(CONFIG_HID_UDRAW_PS3) |
| 2106 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, | 2271 | { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) }, |
| 2107 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) }, | 2272 | #endif |
| 2108 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) }, | 2273 | #if IS_ENABLED(CONFIG_HID_WALTOP) |
| 2109 | { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) }, | ||
| 2110 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) }, | 2274 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) }, |
| 2111 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) }, | 2275 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) }, |
| 2112 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) }, | 2276 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) }, |
| @@ -2114,19 +2278,18 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 2114 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) }, | 2278 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) }, |
| 2115 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) }, | 2279 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) }, |
| 2116 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, | 2280 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, |
| 2117 | { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, | 2281 | #endif |
| 2282 | #if IS_ENABLED(CONFIG_HID_XINMO) | ||
| 2118 | { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, | 2283 | { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, |
| 2119 | { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, | 2284 | { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, |
| 2285 | #endif | ||
| 2286 | #if IS_ENABLED(CONFIG_HID_ZEROPLUS) | ||
| 2120 | { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, | 2287 | { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, |
| 2121 | { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, | 2288 | { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, |
| 2289 | #endif | ||
| 2290 | #if IS_ENABLED(CONFIG_HID_ZYDACRON) | ||
| 2122 | { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, | 2291 | { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, |
| 2123 | 2292 | #endif | |
| 2124 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, | ||
| 2125 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, | ||
| 2126 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, | ||
| 2127 | { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) }, | ||
| 2128 | { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) }, | ||
| 2129 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) }, | ||
| 2130 | { } | 2293 | { } |
| 2131 | }; | 2294 | }; |
| 2132 | 2295 | ||
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c index 6e3848a8d8dd..e2c7465df69f 100644 --- a/drivers/hid/hid-elecom.c +++ b/drivers/hid/hid-elecom.c | |||
| @@ -1,10 +1,8 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * HID driver for Elecom BM084 (bluetooth mouse). | 2 | * HID driver for ELECOM devices. |
| 3 | * Removes a non-existing horizontal wheel from | ||
| 4 | * the HID descriptor. | ||
| 5 | * (This module is based on "hid-ortek".) | ||
| 6 | * | ||
| 7 | * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> | 3 | * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> |
| 4 | * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> | ||
| 5 | * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> | ||
| 8 | */ | 6 | */ |
| 9 | 7 | ||
| 10 | /* | 8 | /* |
| @@ -23,15 +21,61 @@ | |||
| 23 | static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, | 21 | static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
| 24 | unsigned int *rsize) | 22 | unsigned int *rsize) |
| 25 | { | 23 | { |
| 26 | if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) { | 24 | switch (hdev->product) { |
| 27 | hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n"); | 25 | case USB_DEVICE_ID_ELECOM_BM084: |
| 28 | rdesc[47] = 0x00; | 26 | /* The BM084 Bluetooth mouse includes a non-existing horizontal |
| 27 | * wheel in the HID descriptor. */ | ||
| 28 | if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) { | ||
| 29 | hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n"); | ||
| 30 | rdesc[47] = 0x00; | ||
| 31 | } | ||
| 32 | break; | ||
| 33 | case USB_DEVICE_ID_ELECOM_DEFT_WIRED: | ||
| 34 | case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: | ||
| 35 | /* The DEFT trackball has eight buttons, but its descriptor only | ||
| 36 | * reports five, disabling the three Fn buttons on the top of | ||
| 37 | * the mouse. | ||
| 38 | * | ||
| 39 | * Apply the following diff to the descriptor: | ||
| 40 | * | ||
| 41 | * Collection (Physical), Collection (Physical), | ||
| 42 | * Report ID (1), Report ID (1), | ||
| 43 | * Report Count (5), -> Report Count (8), | ||
| 44 | * Report Size (1), Report Size (1), | ||
| 45 | * Usage Page (Button), Usage Page (Button), | ||
| 46 | * Usage Minimum (01h), Usage Minimum (01h), | ||
| 47 | * Usage Maximum (05h), -> Usage Maximum (08h), | ||
| 48 | * Logical Minimum (0), Logical Minimum (0), | ||
| 49 | * Logical Maximum (1), Logical Maximum (1), | ||
| 50 | * Input (Variable), Input (Variable), | ||
| 51 | * Report Count (1), -> Report Count (0), | ||
| 52 | * Report Size (3), Report Size (3), | ||
| 53 | * Input (Constant), Input (Constant), | ||
| 54 | * Report Size (16), Report Size (16), | ||
| 55 | * Report Count (2), Report Count (2), | ||
| 56 | * Usage Page (Desktop), Usage Page (Desktop), | ||
| 57 | * Usage (X), Usage (X), | ||
| 58 | * Usage (Y), Usage (Y), | ||
| 59 | * Logical Minimum (-32768), Logical Minimum (-32768), | ||
| 60 | * Logical Maximum (32767), Logical Maximum (32767), | ||
| 61 | * Input (Variable, Relative), Input (Variable, Relative), | ||
| 62 | * End Collection, End Collection, | ||
| 63 | */ | ||
| 64 | if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { | ||
| 65 | hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n"); | ||
| 66 | rdesc[13] = 8; /* Button/Variable Report Count */ | ||
| 67 | rdesc[21] = 8; /* Button/Variable Usage Maximum */ | ||
| 68 | rdesc[29] = 0; /* Button/Constant Report Count */ | ||
| 69 | } | ||
| 70 | break; | ||
| 29 | } | 71 | } |
| 30 | return rdesc; | 72 | return rdesc; |
| 31 | } | 73 | } |
| 32 | 74 | ||
| 33 | static const struct hid_device_id elecom_devices[] = { | 75 | static const struct hid_device_id elecom_devices[] = { |
| 34 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084)}, | 76 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, |
| 77 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, | ||
| 78 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, | ||
| 35 | { } | 79 | { } |
| 36 | }; | 80 | }; |
| 37 | MODULE_DEVICE_TABLE(hid, elecom_devices); | 81 | MODULE_DEVICE_TABLE(hid, elecom_devices); |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 643390ba749d..4f9a3938189a 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -173,6 +173,7 @@ | |||
| 173 | #define USB_VENDOR_ID_ASUSTEK 0x0b05 | 173 | #define USB_VENDOR_ID_ASUSTEK 0x0b05 |
| 174 | #define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 | 174 | #define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 |
| 175 | #define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b | 175 | #define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b |
| 176 | #define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD 0x17e0 | ||
| 176 | #define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585 | 177 | #define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585 |
| 177 | #define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD 0x0101 | 178 | #define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD 0x0101 |
| 178 | #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854 | 179 | #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854 |
| @@ -318,6 +319,9 @@ | |||
| 318 | #define USB_VENDOR_ID_DELCOM 0x0fc5 | 319 | #define USB_VENDOR_ID_DELCOM 0x0fc5 |
| 319 | #define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080 | 320 | #define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080 |
| 320 | 321 | ||
| 322 | #define USB_VENDOR_ID_DELL 0x413c | ||
| 323 | #define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a | ||
| 324 | |||
| 321 | #define USB_VENDOR_ID_DELORME 0x1163 | 325 | #define USB_VENDOR_ID_DELORME 0x1163 |
| 322 | #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 | 326 | #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 |
| 323 | #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 | 327 | #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 |
| @@ -358,6 +362,8 @@ | |||
| 358 | 362 | ||
| 359 | #define USB_VENDOR_ID_ELECOM 0x056e | 363 | #define USB_VENDOR_ID_ELECOM 0x056e |
| 360 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 | 364 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 |
| 365 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe | ||
| 366 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff | ||
| 361 | 367 | ||
| 362 | #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 | 368 | #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 |
| 363 | #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 | 369 | #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 |
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 8daa8ce64ebb..fb55fb4c39fc 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c | |||
| @@ -897,6 +897,15 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client, | |||
| 897 | return 0; | 897 | return 0; |
| 898 | } | 898 | } |
| 899 | 899 | ||
| 900 | static void i2c_hid_acpi_fix_up_power(struct device *dev) | ||
| 901 | { | ||
| 902 | acpi_handle handle = ACPI_HANDLE(dev); | ||
| 903 | struct acpi_device *adev; | ||
| 904 | |||
| 905 | if (handle && acpi_bus_get_device(handle, &adev) == 0) | ||
| 906 | acpi_device_fix_up_power(adev); | ||
| 907 | } | ||
| 908 | |||
| 900 | static const struct acpi_device_id i2c_hid_acpi_match[] = { | 909 | static const struct acpi_device_id i2c_hid_acpi_match[] = { |
| 901 | {"ACPI0C50", 0 }, | 910 | {"ACPI0C50", 0 }, |
| 902 | {"PNP0C50", 0 }, | 911 | {"PNP0C50", 0 }, |
| @@ -909,6 +918,8 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client, | |||
| 909 | { | 918 | { |
| 910 | return -ENODEV; | 919 | return -ENODEV; |
| 911 | } | 920 | } |
| 921 | |||
| 922 | static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {} | ||
| 912 | #endif | 923 | #endif |
| 913 | 924 | ||
| 914 | #ifdef CONFIG_OF | 925 | #ifdef CONFIG_OF |
| @@ -1030,6 +1041,8 @@ static int i2c_hid_probe(struct i2c_client *client, | |||
| 1030 | if (ret < 0) | 1041 | if (ret < 0) |
| 1031 | goto err_regulator; | 1042 | goto err_regulator; |
| 1032 | 1043 | ||
| 1044 | i2c_hid_acpi_fix_up_power(&client->dev); | ||
| 1045 | |||
| 1033 | pm_runtime_get_noresume(&client->dev); | 1046 | pm_runtime_get_noresume(&client->dev); |
| 1034 | pm_runtime_set_active(&client->dev); | 1047 | pm_runtime_set_active(&client->dev); |
| 1035 | pm_runtime_enable(&client->dev); | 1048 | pm_runtime_enable(&client->dev); |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 6316498b7812..a88e7c7bea0a 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
| @@ -85,6 +85,7 @@ static const struct hid_blacklist { | |||
| 85 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, | 85 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, |
| 86 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, | 86 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, |
| 87 | { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, | 87 | { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, |
| 88 | { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, | ||
| 88 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, | 89 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, |
| 89 | { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, | 90 | { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, |
| 90 | { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT }, | 91 | { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT }, |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 4b225fb19a16..e274c9dc32f3 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
| @@ -1571,37 +1571,38 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len) | |||
| 1571 | { | 1571 | { |
| 1572 | unsigned char *data = wacom->data; | 1572 | unsigned char *data = wacom->data; |
| 1573 | 1573 | ||
| 1574 | if (wacom->pen_input) | 1574 | if (wacom->pen_input) { |
| 1575 | dev_dbg(wacom->pen_input->dev.parent, | 1575 | dev_dbg(wacom->pen_input->dev.parent, |
| 1576 | "%s: received report #%d\n", __func__, data[0]); | 1576 | "%s: received report #%d\n", __func__, data[0]); |
| 1577 | else if (wacom->touch_input) | 1577 | |
| 1578 | if (len == WACOM_PKGLEN_PENABLED || | ||
| 1579 | data[0] == WACOM_REPORT_PENABLED) | ||
| 1580 | return wacom_tpc_pen(wacom); | ||
| 1581 | } | ||
| 1582 | else if (wacom->touch_input) { | ||
| 1578 | dev_dbg(wacom->touch_input->dev.parent, | 1583 | dev_dbg(wacom->touch_input->dev.parent, |
| 1579 | "%s: received report #%d\n", __func__, data[0]); | 1584 | "%s: received report #%d\n", __func__, data[0]); |
| 1580 | 1585 | ||
| 1581 | switch (len) { | 1586 | switch (len) { |
| 1582 | case WACOM_PKGLEN_TPC1FG: | 1587 | case WACOM_PKGLEN_TPC1FG: |
| 1583 | return wacom_tpc_single_touch(wacom, len); | 1588 | return wacom_tpc_single_touch(wacom, len); |
| 1584 | 1589 | ||
| 1585 | case WACOM_PKGLEN_TPC2FG: | 1590 | case WACOM_PKGLEN_TPC2FG: |
| 1586 | return wacom_tpc_mt_touch(wacom); | 1591 | return wacom_tpc_mt_touch(wacom); |
| 1587 | 1592 | ||
| 1588 | case WACOM_PKGLEN_PENABLED: | 1593 | default: |
| 1589 | return wacom_tpc_pen(wacom); | 1594 | switch (data[0]) { |
| 1595 | case WACOM_REPORT_TPC1FG: | ||
| 1596 | case WACOM_REPORT_TPCHID: | ||
| 1597 | case WACOM_REPORT_TPCST: | ||
| 1598 | case WACOM_REPORT_TPC1FGE: | ||
| 1599 | return wacom_tpc_single_touch(wacom, len); | ||
| 1590 | 1600 | ||
| 1591 | default: | 1601 | case WACOM_REPORT_TPCMT: |
| 1592 | switch (data[0]) { | 1602 | case WACOM_REPORT_TPCMT2: |
| 1593 | case WACOM_REPORT_TPC1FG: | 1603 | return wacom_mt_touch(wacom); |
| 1594 | case WACOM_REPORT_TPCHID: | ||
| 1595 | case WACOM_REPORT_TPCST: | ||
| 1596 | case WACOM_REPORT_TPC1FGE: | ||
| 1597 | return wacom_tpc_single_touch(wacom, len); | ||
| 1598 | |||
| 1599 | case WACOM_REPORT_TPCMT: | ||
| 1600 | case WACOM_REPORT_TPCMT2: | ||
| 1601 | return wacom_mt_touch(wacom); | ||
| 1602 | 1604 | ||
| 1603 | case WACOM_REPORT_PENABLED: | 1605 | } |
| 1604 | return wacom_tpc_pen(wacom); | ||
| 1605 | } | 1606 | } |
| 1606 | } | 1607 | } |
| 1607 | 1608 | ||
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index 26b05106f0d3..93d28c0ec8bf 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c | |||
| @@ -1066,7 +1066,7 @@ static void ssip_pn_setup(struct net_device *dev) | |||
| 1066 | dev->addr_len = 1; | 1066 | dev->addr_len = 1; |
| 1067 | dev->tx_queue_len = SSIP_TXQUEUE_LEN; | 1067 | dev->tx_queue_len = SSIP_TXQUEUE_LEN; |
| 1068 | 1068 | ||
| 1069 | dev->destructor = free_netdev; | 1069 | dev->needs_free_netdev = true; |
| 1070 | dev->header_ops = &phonet_header_ops; | 1070 | dev->header_ops = &phonet_header_ops; |
| 1071 | } | 1071 | } |
| 1072 | 1072 | ||
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 22d5eafd6815..5ef2814345ef 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
| @@ -343,6 +343,7 @@ config SENSORS_ASB100 | |||
| 343 | 343 | ||
| 344 | config SENSORS_ASPEED | 344 | config SENSORS_ASPEED |
| 345 | tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver" | 345 | tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver" |
| 346 | select REGMAP | ||
| 346 | help | 347 | help |
| 347 | This driver provides support for ASPEED AST2400/AST2500 PWM | 348 | This driver provides support for ASPEED AST2400/AST2500 PWM |
| 348 | and Fan Tacho controllers. | 349 | and Fan Tacho controllers. |
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c index 48403a2115be..9de13d626c68 100644 --- a/drivers/hwmon/aspeed-pwm-tacho.c +++ b/drivers/hwmon/aspeed-pwm-tacho.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/clk.h> | 9 | #include <linux/clk.h> |
| 10 | #include <linux/errno.h> | ||
| 10 | #include <linux/gpio/consumer.h> | 11 | #include <linux/gpio/consumer.h> |
| 11 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
| 12 | #include <linux/hwmon.h> | 13 | #include <linux/hwmon.h> |
| @@ -494,7 +495,7 @@ static u32 aspeed_get_fan_tach_ch_measure_period(struct aspeed_pwm_tacho_data | |||
| 494 | return clk / (clk_unit * div_h * div_l * tacho_div * tacho_unit); | 495 | return clk / (clk_unit * div_h * div_l * tacho_div * tacho_unit); |
| 495 | } | 496 | } |
| 496 | 497 | ||
| 497 | static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, | 498 | static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, |
| 498 | u8 fan_tach_ch) | 499 | u8 fan_tach_ch) |
| 499 | { | 500 | { |
| 500 | u32 raw_data, tach_div, clk_source, sec, val; | 501 | u32 raw_data, tach_div, clk_source, sec, val; |
| @@ -510,6 +511,9 @@ static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, | |||
| 510 | msleep(sec); | 511 | msleep(sec); |
| 511 | 512 | ||
| 512 | regmap_read(priv->regmap, ASPEED_PTCR_RESULT, &val); | 513 | regmap_read(priv->regmap, ASPEED_PTCR_RESULT, &val); |
| 514 | if (!(val & RESULT_STATUS_MASK)) | ||
| 515 | return -ETIMEDOUT; | ||
| 516 | |||
| 513 | raw_data = val & RESULT_VALUE_MASK; | 517 | raw_data = val & RESULT_VALUE_MASK; |
| 514 | tach_div = priv->type_fan_tach_clock_division[type]; | 518 | tach_div = priv->type_fan_tach_clock_division[type]; |
| 515 | tach_div = 0x4 << (tach_div * 2); | 519 | tach_div = 0x4 << (tach_div * 2); |
| @@ -561,12 +565,14 @@ static ssize_t show_rpm(struct device *dev, struct device_attribute *attr, | |||
| 561 | { | 565 | { |
| 562 | struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); | 566 | struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); |
| 563 | int index = sensor_attr->index; | 567 | int index = sensor_attr->index; |
| 564 | u32 rpm; | 568 | int rpm; |
| 565 | struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev); | 569 | struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev); |
| 566 | 570 | ||
| 567 | rpm = aspeed_get_fan_tach_ch_rpm(priv, index); | 571 | rpm = aspeed_get_fan_tach_ch_rpm(priv, index); |
| 572 | if (rpm < 0) | ||
| 573 | return rpm; | ||
| 568 | 574 | ||
| 569 | return sprintf(buf, "%u\n", rpm); | 575 | return sprintf(buf, "%d\n", rpm); |
| 570 | } | 576 | } |
| 571 | 577 | ||
| 572 | static umode_t pwm_is_visible(struct kobject *kobj, | 578 | static umode_t pwm_is_visible(struct kobject *kobj, |
| @@ -591,24 +597,23 @@ static umode_t fan_dev_is_visible(struct kobject *kobj, | |||
| 591 | return a->mode; | 597 | return a->mode; |
| 592 | } | 598 | } |
| 593 | 599 | ||
| 594 | static SENSOR_DEVICE_ATTR(pwm0, 0644, | ||
| 595 | show_pwm, set_pwm, 0); | ||
| 596 | static SENSOR_DEVICE_ATTR(pwm1, 0644, | 600 | static SENSOR_DEVICE_ATTR(pwm1, 0644, |
| 597 | show_pwm, set_pwm, 1); | 601 | show_pwm, set_pwm, 0); |
| 598 | static SENSOR_DEVICE_ATTR(pwm2, 0644, | 602 | static SENSOR_DEVICE_ATTR(pwm2, 0644, |
| 599 | show_pwm, set_pwm, 2); | 603 | show_pwm, set_pwm, 1); |
| 600 | static SENSOR_DEVICE_ATTR(pwm3, 0644, | 604 | static SENSOR_DEVICE_ATTR(pwm3, 0644, |
| 601 | show_pwm, set_pwm, 3); | 605 | show_pwm, set_pwm, 2); |
| 602 | static SENSOR_DEVICE_ATTR(pwm4, 0644, | 606 | static SENSOR_DEVICE_ATTR(pwm4, 0644, |
| 603 | show_pwm, set_pwm, 4); | 607 | show_pwm, set_pwm, 3); |
| 604 | static SENSOR_DEVICE_ATTR(pwm5, 0644, | 608 | static SENSOR_DEVICE_ATTR(pwm5, 0644, |
| 605 | show_pwm, set_pwm, 5); | 609 | show_pwm, set_pwm, 4); |
| 606 | static SENSOR_DEVICE_ATTR(pwm6, 0644, | 610 | static SENSOR_DEVICE_ATTR(pwm6, 0644, |
| 607 | show_pwm, set_pwm, 6); | 611 | show_pwm, set_pwm, 5); |
| 608 | static SENSOR_DEVICE_ATTR(pwm7, 0644, | 612 | static SENSOR_DEVICE_ATTR(pwm7, 0644, |
| 613 | show_pwm, set_pwm, 6); | ||
| 614 | static SENSOR_DEVICE_ATTR(pwm8, 0644, | ||
| 609 | show_pwm, set_pwm, 7); | 615 | show_pwm, set_pwm, 7); |
| 610 | static struct attribute *pwm_dev_attrs[] = { | 616 | static struct attribute *pwm_dev_attrs[] = { |
| 611 | &sensor_dev_attr_pwm0.dev_attr.attr, | ||
| 612 | &sensor_dev_attr_pwm1.dev_attr.attr, | 617 | &sensor_dev_attr_pwm1.dev_attr.attr, |
| 613 | &sensor_dev_attr_pwm2.dev_attr.attr, | 618 | &sensor_dev_attr_pwm2.dev_attr.attr, |
| 614 | &sensor_dev_attr_pwm3.dev_attr.attr, | 619 | &sensor_dev_attr_pwm3.dev_attr.attr, |
| @@ -616,6 +621,7 @@ static struct attribute *pwm_dev_attrs[] = { | |||
| 616 | &sensor_dev_attr_pwm5.dev_attr.attr, | 621 | &sensor_dev_attr_pwm5.dev_attr.attr, |
| 617 | &sensor_dev_attr_pwm6.dev_attr.attr, | 622 | &sensor_dev_attr_pwm6.dev_attr.attr, |
| 618 | &sensor_dev_attr_pwm7.dev_attr.attr, | 623 | &sensor_dev_attr_pwm7.dev_attr.attr, |
| 624 | &sensor_dev_attr_pwm8.dev_attr.attr, | ||
| 619 | NULL, | 625 | NULL, |
| 620 | }; | 626 | }; |
| 621 | 627 | ||
| @@ -624,40 +630,39 @@ static const struct attribute_group pwm_dev_group = { | |||
| 624 | .is_visible = pwm_is_visible, | 630 | .is_visible = pwm_is_visible, |
| 625 | }; | 631 | }; |
| 626 | 632 | ||
| 627 | static SENSOR_DEVICE_ATTR(fan0_input, 0444, | ||
| 628 | show_rpm, NULL, 0); | ||
| 629 | static SENSOR_DEVICE_ATTR(fan1_input, 0444, | 633 | static SENSOR_DEVICE_ATTR(fan1_input, 0444, |
| 630 | show_rpm, NULL, 1); | 634 | show_rpm, NULL, 0); |
| 631 | static SENSOR_DEVICE_ATTR(fan2_input, 0444, | 635 | static SENSOR_DEVICE_ATTR(fan2_input, 0444, |
| 632 | show_rpm, NULL, 2); | 636 | show_rpm, NULL, 1); |
| 633 | static SENSOR_DEVICE_ATTR(fan3_input, 0444, | 637 | static SENSOR_DEVICE_ATTR(fan3_input, 0444, |
| 634 | show_rpm, NULL, 3); | 638 | show_rpm, NULL, 2); |
| 635 | static SENSOR_DEVICE_ATTR(fan4_input, 0444, | 639 | static SENSOR_DEVICE_ATTR(fan4_input, 0444, |
| 636 | show_rpm, NULL, 4); | 640 | show_rpm, NULL, 3); |
| 637 | static SENSOR_DEVICE_ATTR(fan5_input, 0444, | 641 | static SENSOR_DEVICE_ATTR(fan5_input, 0444, |
| 638 | show_rpm, NULL, 5); | 642 | show_rpm, NULL, 4); |
| 639 | static SENSOR_DEVICE_ATTR(fan6_input, 0444, | 643 | static SENSOR_DEVICE_ATTR(fan6_input, 0444, |
| 640 | show_rpm, NULL, 6); | 644 | show_rpm, NULL, 5); |
| 641 | static SENSOR_DEVICE_ATTR(fan7_input, 0444, | 645 | static SENSOR_DEVICE_ATTR(fan7_input, 0444, |
| 642 | show_rpm, NULL, 7); | 646 | show_rpm, NULL, 6); |
| 643 | static SENSOR_DEVICE_ATTR(fan8_input, 0444, | 647 | static SENSOR_DEVICE_ATTR(fan8_input, 0444, |
| 644 | show_rpm, NULL, 8); | 648 | show_rpm, NULL, 7); |
| 645 | static SENSOR_DEVICE_ATTR(fan9_input, 0444, | 649 | static SENSOR_DEVICE_ATTR(fan9_input, 0444, |
| 646 | show_rpm, NULL, 9); | 650 | show_rpm, NULL, 8); |
| 647 | static SENSOR_DEVICE_ATTR(fan10_input, 0444, | 651 | static SENSOR_DEVICE_ATTR(fan10_input, 0444, |
| 648 | show_rpm, NULL, 10); | 652 | show_rpm, NULL, 9); |
| 649 | static SENSOR_DEVICE_ATTR(fan11_input, 0444, | 653 | static SENSOR_DEVICE_ATTR(fan11_input, 0444, |
| 650 | show_rpm, NULL, 11); | 654 | show_rpm, NULL, 10); |
| 651 | static SENSOR_DEVICE_ATTR(fan12_input, 0444, | 655 | static SENSOR_DEVICE_ATTR(fan12_input, 0444, |
| 652 | show_rpm, NULL, 12); | 656 | show_rpm, NULL, 11); |
| 653 | static SENSOR_DEVICE_ATTR(fan13_input, 0444, | 657 | static SENSOR_DEVICE_ATTR(fan13_input, 0444, |
| 654 | show_rpm, NULL, 13); | 658 | show_rpm, NULL, 12); |
| 655 | static SENSOR_DEVICE_ATTR(fan14_input, 0444, | 659 | static SENSOR_DEVICE_ATTR(fan14_input, 0444, |
| 656 | show_rpm, NULL, 14); | 660 | show_rpm, NULL, 13); |
| 657 | static SENSOR_DEVICE_ATTR(fan15_input, 0444, | 661 | static SENSOR_DEVICE_ATTR(fan15_input, 0444, |
| 662 | show_rpm, NULL, 14); | ||
| 663 | static SENSOR_DEVICE_ATTR(fan16_input, 0444, | ||
| 658 | show_rpm, NULL, 15); | 664 | show_rpm, NULL, 15); |
| 659 | static struct attribute *fan_dev_attrs[] = { | 665 | static struct attribute *fan_dev_attrs[] = { |
| 660 | &sensor_dev_attr_fan0_input.dev_attr.attr, | ||
| 661 | &sensor_dev_attr_fan1_input.dev_attr.attr, | 666 | &sensor_dev_attr_fan1_input.dev_attr.attr, |
| 662 | &sensor_dev_attr_fan2_input.dev_attr.attr, | 667 | &sensor_dev_attr_fan2_input.dev_attr.attr, |
| 663 | &sensor_dev_attr_fan3_input.dev_attr.attr, | 668 | &sensor_dev_attr_fan3_input.dev_attr.attr, |
| @@ -673,6 +678,7 @@ static struct attribute *fan_dev_attrs[] = { | |||
| 673 | &sensor_dev_attr_fan13_input.dev_attr.attr, | 678 | &sensor_dev_attr_fan13_input.dev_attr.attr, |
| 674 | &sensor_dev_attr_fan14_input.dev_attr.attr, | 679 | &sensor_dev_attr_fan14_input.dev_attr.attr, |
| 675 | &sensor_dev_attr_fan15_input.dev_attr.attr, | 680 | &sensor_dev_attr_fan15_input.dev_attr.attr, |
| 681 | &sensor_dev_attr_fan16_input.dev_attr.attr, | ||
| 676 | NULL | 682 | NULL |
| 677 | }; | 683 | }; |
| 678 | 684 | ||
| @@ -802,7 +808,6 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev) | |||
| 802 | if (ret) | 808 | if (ret) |
| 803 | return ret; | 809 | return ret; |
| 804 | } | 810 | } |
| 805 | of_node_put(np); | ||
| 806 | 811 | ||
| 807 | priv->groups[0] = &pwm_dev_group; | 812 | priv->groups[0] = &pwm_dev_group; |
| 808 | priv->groups[1] = &fan_dev_group; | 813 | priv->groups[1] = &fan_dev_group; |
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 95ed17183e73..54a47b40546f 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
| @@ -734,9 +734,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, | |||
| 734 | * the first read operation, otherwise the first read cost | 734 | * the first read operation, otherwise the first read cost |
| 735 | * one extra clock cycle. | 735 | * one extra clock cycle. |
| 736 | */ | 736 | */ |
| 737 | temp = readb(i2c_imx->base + IMX_I2C_I2CR); | 737 | temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); |
| 738 | temp |= I2CR_MTX; | 738 | temp |= I2CR_MTX; |
| 739 | writeb(temp, i2c_imx->base + IMX_I2C_I2CR); | 739 | imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); |
| 740 | } | 740 | } |
| 741 | msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); | 741 | msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); |
| 742 | 742 | ||
| @@ -857,9 +857,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo | |||
| 857 | * the first read operation, otherwise the first read cost | 857 | * the first read operation, otherwise the first read cost |
| 858 | * one extra clock cycle. | 858 | * one extra clock cycle. |
| 859 | */ | 859 | */ |
| 860 | temp = readb(i2c_imx->base + IMX_I2C_I2CR); | 860 | temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); |
| 861 | temp |= I2CR_MTX; | 861 | temp |= I2CR_MTX; |
| 862 | writeb(temp, i2c_imx->base + IMX_I2C_I2CR); | 862 | imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); |
| 863 | } | 863 | } |
| 864 | } else if (i == (msgs->len - 2)) { | 864 | } else if (i == (msgs->len - 2)) { |
| 865 | dev_dbg(&i2c_imx->adapter.dev, | 865 | dev_dbg(&i2c_imx->adapter.dev, |
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index f573448d2132..e98e44e584a4 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c | |||
| @@ -584,7 +584,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, | |||
| 584 | 584 | ||
| 585 | /* unmap the data buffer */ | 585 | /* unmap the data buffer */ |
| 586 | if (dma_size != 0) | 586 | if (dma_size != 0) |
| 587 | dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction); | 587 | dma_unmap_single(dev, dma_addr, dma_size, dma_direction); |
| 588 | 588 | ||
| 589 | if (unlikely(!time_left)) { | 589 | if (unlikely(!time_left)) { |
| 590 | dev_err(dev, "completion wait timed out\n"); | 590 | dev_err(dev, "completion wait timed out\n"); |
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 214bf2835d1f..8be3e6cb8fe6 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c | |||
| @@ -319,7 +319,7 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv) | |||
| 319 | rcar_i2c_write(priv, ICFBSCR, TCYC06); | 319 | rcar_i2c_write(priv, ICFBSCR, TCYC06); |
| 320 | 320 | ||
| 321 | dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), | 321 | dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), |
| 322 | priv->msg->len, priv->dma_direction); | 322 | sg_dma_len(&priv->sg), priv->dma_direction); |
| 323 | 323 | ||
| 324 | priv->dma_direction = DMA_NONE; | 324 | priv->dma_direction = DMA_NONE; |
| 325 | } | 325 | } |
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c index 21d38c8af21e..7f4f9c4150e3 100644 --- a/drivers/iio/adc/bcm_iproc_adc.c +++ b/drivers/iio/adc/bcm_iproc_adc.c | |||
| @@ -143,7 +143,7 @@ static void iproc_adc_reg_dump(struct iio_dev *indio_dev) | |||
| 143 | iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA); | 143 | iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA); |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data) | 146 | static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data) |
| 147 | { | 147 | { |
| 148 | u32 channel_intr_status; | 148 | u32 channel_intr_status; |
| 149 | u32 intr_status; | 149 | u32 intr_status; |
| @@ -167,7 +167,7 @@ static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data) | |||
| 167 | return IRQ_NONE; | 167 | return IRQ_NONE; |
| 168 | } | 168 | } |
| 169 | 169 | ||
| 170 | static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data) | 170 | static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data) |
| 171 | { | 171 | { |
| 172 | irqreturn_t retval = IRQ_NONE; | 172 | irqreturn_t retval = IRQ_NONE; |
| 173 | struct iproc_adc_priv *adc_priv; | 173 | struct iproc_adc_priv *adc_priv; |
| @@ -181,7 +181,7 @@ static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data) | |||
| 181 | adc_priv = iio_priv(indio_dev); | 181 | adc_priv = iio_priv(indio_dev); |
| 182 | 182 | ||
| 183 | regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status); | 183 | regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status); |
| 184 | dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n", | 184 | dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_handler(),INTRPT_STS:%x\n", |
| 185 | intr_status); | 185 | intr_status); |
| 186 | 186 | ||
| 187 | intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR; | 187 | intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR; |
| @@ -566,8 +566,8 @@ static int iproc_adc_probe(struct platform_device *pdev) | |||
| 566 | } | 566 | } |
| 567 | 567 | ||
| 568 | ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno, | 568 | ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno, |
| 569 | iproc_adc_interrupt_thread, | ||
| 570 | iproc_adc_interrupt_handler, | 569 | iproc_adc_interrupt_handler, |
| 570 | iproc_adc_interrupt_thread, | ||
| 571 | IRQF_SHARED, "iproc-adc", indio_dev); | 571 | IRQF_SHARED, "iproc-adc", indio_dev); |
| 572 | if (ret) { | 572 | if (ret) { |
| 573 | dev_err(&pdev->dev, "request_irq error %d\n", ret); | 573 | dev_err(&pdev->dev, "request_irq error %d\n", ret); |
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c index ec82106480e1..b0526e4b9530 100644 --- a/drivers/iio/adc/max9611.c +++ b/drivers/iio/adc/max9611.c | |||
| @@ -438,10 +438,10 @@ static ssize_t max9611_shunt_resistor_show(struct device *dev, | |||
| 438 | struct max9611_dev *max9611 = iio_priv(dev_to_iio_dev(dev)); | 438 | struct max9611_dev *max9611 = iio_priv(dev_to_iio_dev(dev)); |
| 439 | unsigned int i, r; | 439 | unsigned int i, r; |
| 440 | 440 | ||
| 441 | i = max9611->shunt_resistor_uohm / 1000; | 441 | i = max9611->shunt_resistor_uohm / 1000000; |
| 442 | r = max9611->shunt_resistor_uohm % 1000; | 442 | r = max9611->shunt_resistor_uohm % 1000000; |
| 443 | 443 | ||
| 444 | return sprintf(buf, "%u.%03u\n", i, r); | 444 | return sprintf(buf, "%u.%06u\n", i, r); |
| 445 | } | 445 | } |
| 446 | 446 | ||
| 447 | static IIO_DEVICE_ATTR(in_power_shunt_resistor, 0444, | 447 | static IIO_DEVICE_ATTR(in_power_shunt_resistor, 0444, |
| @@ -536,8 +536,8 @@ static int max9611_probe(struct i2c_client *client, | |||
| 536 | int ret; | 536 | int ret; |
| 537 | 537 | ||
| 538 | indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*max9611)); | 538 | indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*max9611)); |
| 539 | if (IS_ERR(indio_dev)) | 539 | if (!indio_dev) |
| 540 | return PTR_ERR(indio_dev); | 540 | return -ENOMEM; |
| 541 | 541 | ||
| 542 | i2c_set_clientdata(client, indio_dev); | 542 | i2c_set_clientdata(client, indio_dev); |
| 543 | 543 | ||
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index dd4190b50df6..6066bbfc42fe 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c | |||
| @@ -468,13 +468,13 @@ static void meson_sar_adc_unlock(struct iio_dev *indio_dev) | |||
| 468 | static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev) | 468 | static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev) |
| 469 | { | 469 | { |
| 470 | struct meson_sar_adc_priv *priv = iio_priv(indio_dev); | 470 | struct meson_sar_adc_priv *priv = iio_priv(indio_dev); |
| 471 | int count; | 471 | unsigned int count, tmp; |
| 472 | 472 | ||
| 473 | for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) { | 473 | for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) { |
| 474 | if (!meson_sar_adc_get_fifo_count(indio_dev)) | 474 | if (!meson_sar_adc_get_fifo_count(indio_dev)) |
| 475 | break; | 475 | break; |
| 476 | 476 | ||
| 477 | regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, 0); | 477 | regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &tmp); |
| 478 | } | 478 | } |
| 479 | } | 479 | } |
| 480 | 480 | ||
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c index b0c7d8ee5cb8..6888167ca1e6 100644 --- a/drivers/iio/adc/mxs-lradc-adc.c +++ b/drivers/iio/adc/mxs-lradc-adc.c | |||
| @@ -718,9 +718,12 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev) | |||
| 718 | adc->dev = dev; | 718 | adc->dev = dev; |
| 719 | 719 | ||
| 720 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 720 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 721 | if (!iores) | ||
| 722 | return -EINVAL; | ||
| 723 | |||
| 721 | adc->base = devm_ioremap(dev, iores->start, resource_size(iores)); | 724 | adc->base = devm_ioremap(dev, iores->start, resource_size(iores)); |
| 722 | if (IS_ERR(adc->base)) | 725 | if (!adc->base) |
| 723 | return PTR_ERR(adc->base); | 726 | return -ENOMEM; |
| 724 | 727 | ||
| 725 | init_completion(&adc->completion); | 728 | init_completion(&adc->completion); |
| 726 | spin_lock_init(&adc->lock); | 729 | spin_lock_init(&adc->lock); |
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c index b23527309088..81d4c39e414a 100644 --- a/drivers/iio/adc/sun4i-gpadc-iio.c +++ b/drivers/iio/adc/sun4i-gpadc-iio.c | |||
| @@ -105,6 +105,8 @@ struct sun4i_gpadc_iio { | |||
| 105 | bool no_irq; | 105 | bool no_irq; |
| 106 | /* prevents concurrent reads of temperature and ADC */ | 106 | /* prevents concurrent reads of temperature and ADC */ |
| 107 | struct mutex mutex; | 107 | struct mutex mutex; |
| 108 | struct thermal_zone_device *tzd; | ||
| 109 | struct device *sensor_device; | ||
| 108 | }; | 110 | }; |
| 109 | 111 | ||
| 110 | #define SUN4I_GPADC_ADC_CHANNEL(_channel, _name) { \ | 112 | #define SUN4I_GPADC_ADC_CHANNEL(_channel, _name) { \ |
| @@ -502,7 +504,6 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev, | |||
| 502 | { | 504 | { |
| 503 | struct sun4i_gpadc_iio *info = iio_priv(indio_dev); | 505 | struct sun4i_gpadc_iio *info = iio_priv(indio_dev); |
| 504 | const struct of_device_id *of_dev; | 506 | const struct of_device_id *of_dev; |
| 505 | struct thermal_zone_device *tzd; | ||
| 506 | struct resource *mem; | 507 | struct resource *mem; |
| 507 | void __iomem *base; | 508 | void __iomem *base; |
| 508 | int ret; | 509 | int ret; |
| @@ -532,13 +533,14 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev, | |||
| 532 | if (!IS_ENABLED(CONFIG_THERMAL_OF)) | 533 | if (!IS_ENABLED(CONFIG_THERMAL_OF)) |
| 533 | return 0; | 534 | return 0; |
| 534 | 535 | ||
| 535 | tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, info, | 536 | info->sensor_device = &pdev->dev; |
| 536 | &sun4i_ts_tz_ops); | 537 | info->tzd = thermal_zone_of_sensor_register(info->sensor_device, 0, |
| 537 | if (IS_ERR(tzd)) | 538 | info, &sun4i_ts_tz_ops); |
| 539 | if (IS_ERR(info->tzd)) | ||
| 538 | dev_err(&pdev->dev, "could not register thermal sensor: %ld\n", | 540 | dev_err(&pdev->dev, "could not register thermal sensor: %ld\n", |
| 539 | PTR_ERR(tzd)); | 541 | PTR_ERR(info->tzd)); |
| 540 | 542 | ||
| 541 | return PTR_ERR_OR_ZERO(tzd); | 543 | return PTR_ERR_OR_ZERO(info->tzd); |
| 542 | } | 544 | } |
| 543 | 545 | ||
| 544 | static int sun4i_gpadc_probe_mfd(struct platform_device *pdev, | 546 | static int sun4i_gpadc_probe_mfd(struct platform_device *pdev, |
| @@ -584,15 +586,15 @@ static int sun4i_gpadc_probe_mfd(struct platform_device *pdev, | |||
| 584 | * of_node, and the device from this driver as third argument to | 586 | * of_node, and the device from this driver as third argument to |
| 585 | * return the temperature. | 587 | * return the temperature. |
| 586 | */ | 588 | */ |
| 587 | struct thermal_zone_device *tzd; | 589 | info->sensor_device = pdev->dev.parent; |
| 588 | tzd = devm_thermal_zone_of_sensor_register(pdev->dev.parent, 0, | 590 | info->tzd = thermal_zone_of_sensor_register(info->sensor_device, |
| 589 | info, | 591 | 0, info, |
| 590 | &sun4i_ts_tz_ops); | 592 | &sun4i_ts_tz_ops); |
| 591 | if (IS_ERR(tzd)) { | 593 | if (IS_ERR(info->tzd)) { |
| 592 | dev_err(&pdev->dev, | 594 | dev_err(&pdev->dev, |
| 593 | "could not register thermal sensor: %ld\n", | 595 | "could not register thermal sensor: %ld\n", |
| 594 | PTR_ERR(tzd)); | 596 | PTR_ERR(info->tzd)); |
| 595 | return PTR_ERR(tzd); | 597 | return PTR_ERR(info->tzd); |
| 596 | } | 598 | } |
| 597 | } else { | 599 | } else { |
| 598 | indio_dev->num_channels = | 600 | indio_dev->num_channels = |
| @@ -688,7 +690,13 @@ static int sun4i_gpadc_remove(struct platform_device *pdev) | |||
| 688 | 690 | ||
| 689 | pm_runtime_put(&pdev->dev); | 691 | pm_runtime_put(&pdev->dev); |
| 690 | pm_runtime_disable(&pdev->dev); | 692 | pm_runtime_disable(&pdev->dev); |
| 691 | if (!info->no_irq && IS_ENABLED(CONFIG_THERMAL_OF)) | 693 | |
| 694 | if (!IS_ENABLED(CONFIG_THERMAL_OF)) | ||
| 695 | return 0; | ||
| 696 | |||
| 697 | thermal_zone_of_sensor_unregister(info->sensor_device, info->tzd); | ||
| 698 | |||
| 699 | if (!info->no_irq) | ||
| 692 | iio_map_array_unregister(indio_dev); | 700 | iio_map_array_unregister(indio_dev); |
| 693 | 701 | ||
| 694 | return 0; | 702 | return 0; |
| @@ -700,6 +708,7 @@ static const struct platform_device_id sun4i_gpadc_id[] = { | |||
| 700 | { "sun6i-a31-gpadc-iio", (kernel_ulong_t)&sun6i_gpadc_data }, | 708 | { "sun6i-a31-gpadc-iio", (kernel_ulong_t)&sun6i_gpadc_data }, |
| 701 | { /* sentinel */ }, | 709 | { /* sentinel */ }, |
| 702 | }; | 710 | }; |
| 711 | MODULE_DEVICE_TABLE(platform, sun4i_gpadc_id); | ||
| 703 | 712 | ||
| 704 | static struct platform_driver sun4i_gpadc_driver = { | 713 | static struct platform_driver sun4i_gpadc_driver = { |
| 705 | .driver = { | 714 | .driver = { |
| @@ -711,6 +720,7 @@ static struct platform_driver sun4i_gpadc_driver = { | |||
| 711 | .probe = sun4i_gpadc_probe, | 720 | .probe = sun4i_gpadc_probe, |
| 712 | .remove = sun4i_gpadc_remove, | 721 | .remove = sun4i_gpadc_remove, |
| 713 | }; | 722 | }; |
| 723 | MODULE_DEVICE_TABLE(of, sun4i_gpadc_of_id); | ||
| 714 | 724 | ||
| 715 | module_platform_driver(sun4i_gpadc_driver); | 725 | module_platform_driver(sun4i_gpadc_driver); |
| 716 | 726 | ||
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 4282ceca3d8f..6cbed7eb118a 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c | |||
| @@ -614,7 +614,7 @@ static int tiadc_probe(struct platform_device *pdev) | |||
| 614 | return -EINVAL; | 614 | return -EINVAL; |
| 615 | } | 615 | } |
| 616 | 616 | ||
| 617 | indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev)); | 617 | indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev)); |
| 618 | if (indio_dev == NULL) { | 618 | if (indio_dev == NULL) { |
| 619 | dev_err(&pdev->dev, "failed to allocate iio device\n"); | 619 | dev_err(&pdev->dev, "failed to allocate iio device\n"); |
| 620 | return -ENOMEM; | 620 | return -ENOMEM; |
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c index dd99d273bae9..ff03324dee13 100644 --- a/drivers/iio/buffer/industrialio-buffer-dma.c +++ b/drivers/iio/buffer/industrialio-buffer-dma.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
| 15 | #include <linux/poll.h> | 15 | #include <linux/poll.h> |
| 16 | #include <linux/iio/buffer.h> | 16 | #include <linux/iio/buffer.h> |
| 17 | #include <linux/iio/buffer_impl.h> | ||
| 17 | #include <linux/iio/buffer-dma.h> | 18 | #include <linux/iio/buffer-dma.h> |
| 18 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
| 19 | #include <linux/sizes.h> | 20 | #include <linux/sizes.h> |
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index 9fabed47053d..2b5a320f42c5 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | 14 | ||
| 15 | #include <linux/iio/iio.h> | 15 | #include <linux/iio/iio.h> |
| 16 | #include <linux/iio/buffer.h> | 16 | #include <linux/iio/buffer.h> |
| 17 | #include <linux/iio/buffer_impl.h> | ||
| 17 | #include <linux/iio/buffer-dma.h> | 18 | #include <linux/iio/buffer-dma.h> |
| 18 | #include <linux/iio/buffer-dmaengine.h> | 19 | #include <linux/iio/buffer-dmaengine.h> |
| 19 | 20 | ||
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 96dabbd2f004..88a7c5d4e4d2 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c | |||
| @@ -41,6 +41,7 @@ static const int accel_scale[] = {598, 1196, 2392, 4785}; | |||
| 41 | static const struct inv_mpu6050_reg_map reg_set_6500 = { | 41 | static const struct inv_mpu6050_reg_map reg_set_6500 = { |
| 42 | .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, | 42 | .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, |
| 43 | .lpf = INV_MPU6050_REG_CONFIG, | 43 | .lpf = INV_MPU6050_REG_CONFIG, |
| 44 | .accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2, | ||
| 44 | .user_ctrl = INV_MPU6050_REG_USER_CTRL, | 45 | .user_ctrl = INV_MPU6050_REG_USER_CTRL, |
| 45 | .fifo_en = INV_MPU6050_REG_FIFO_EN, | 46 | .fifo_en = INV_MPU6050_REG_FIFO_EN, |
| 46 | .gyro_config = INV_MPU6050_REG_GYRO_CONFIG, | 47 | .gyro_config = INV_MPU6050_REG_GYRO_CONFIG, |
| @@ -211,6 +212,37 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on) | |||
| 211 | EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg); | 212 | EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg); |
| 212 | 213 | ||
| 213 | /** | 214 | /** |
| 215 | * inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent | ||
| 216 | * | ||
| 217 | * MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope | ||
| 218 | * MPU6500 and above have a dedicated register for accelerometer | ||
| 219 | */ | ||
| 220 | static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st, | ||
| 221 | enum inv_mpu6050_filter_e val) | ||
| 222 | { | ||
| 223 | int result; | ||
| 224 | |||
| 225 | result = regmap_write(st->map, st->reg->lpf, val); | ||
| 226 | if (result) | ||
| 227 | return result; | ||
| 228 | |||
| 229 | switch (st->chip_type) { | ||
| 230 | case INV_MPU6050: | ||
| 231 | case INV_MPU6000: | ||
| 232 | case INV_MPU9150: | ||
| 233 | /* old chips, nothing to do */ | ||
| 234 | result = 0; | ||
| 235 | break; | ||
| 236 | default: | ||
| 237 | /* set accel lpf */ | ||
| 238 | result = regmap_write(st->map, st->reg->accel_lpf, val); | ||
| 239 | break; | ||
| 240 | } | ||
| 241 | |||
| 242 | return result; | ||
| 243 | } | ||
| 244 | |||
| 245 | /** | ||
| 214 | * inv_mpu6050_init_config() - Initialize hardware, disable FIFO. | 246 | * inv_mpu6050_init_config() - Initialize hardware, disable FIFO. |
| 215 | * | 247 | * |
| 216 | * Initial configuration: | 248 | * Initial configuration: |
| @@ -233,8 +265,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev) | |||
| 233 | if (result) | 265 | if (result) |
| 234 | return result; | 266 | return result; |
| 235 | 267 | ||
| 236 | d = INV_MPU6050_FILTER_20HZ; | 268 | result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ); |
| 237 | result = regmap_write(st->map, st->reg->lpf, d); | ||
| 238 | if (result) | 269 | if (result) |
| 239 | return result; | 270 | return result; |
| 240 | 271 | ||
| @@ -537,6 +568,8 @@ error_write_raw: | |||
| 537 | * would be alising. This function basically search for the | 568 | * would be alising. This function basically search for the |
| 538 | * correct low pass parameters based on the fifo rate, e.g, | 569 | * correct low pass parameters based on the fifo rate, e.g, |
| 539 | * sampling frequency. | 570 | * sampling frequency. |
| 571 | * | ||
| 572 | * lpf is set automatically when setting sampling rate to avoid any aliases. | ||
| 540 | */ | 573 | */ |
| 541 | static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) | 574 | static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) |
| 542 | { | 575 | { |
| @@ -552,7 +585,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) | |||
| 552 | while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1)) | 585 | while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1)) |
| 553 | i++; | 586 | i++; |
| 554 | data = d[i]; | 587 | data = d[i]; |
| 555 | result = regmap_write(st->map, st->reg->lpf, data); | 588 | result = inv_mpu6050_set_lpf_regs(st, data); |
| 556 | if (result) | 589 | if (result) |
| 557 | return result; | 590 | return result; |
| 558 | st->chip_config.lpf = data; | 591 | st->chip_config.lpf = data; |
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h index ef13de7a2c20..953a0c09d568 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | * struct inv_mpu6050_reg_map - Notable registers. | 28 | * struct inv_mpu6050_reg_map - Notable registers. |
| 29 | * @sample_rate_div: Divider applied to gyro output rate. | 29 | * @sample_rate_div: Divider applied to gyro output rate. |
| 30 | * @lpf: Configures internal low pass filter. | 30 | * @lpf: Configures internal low pass filter. |
| 31 | * @accel_lpf: Configures accelerometer low pass filter. | ||
| 31 | * @user_ctrl: Enables/resets the FIFO. | 32 | * @user_ctrl: Enables/resets the FIFO. |
| 32 | * @fifo_en: Determines which data will appear in FIFO. | 33 | * @fifo_en: Determines which data will appear in FIFO. |
| 33 | * @gyro_config: gyro config register. | 34 | * @gyro_config: gyro config register. |
| @@ -47,6 +48,7 @@ | |||
| 47 | struct inv_mpu6050_reg_map { | 48 | struct inv_mpu6050_reg_map { |
| 48 | u8 sample_rate_div; | 49 | u8 sample_rate_div; |
| 49 | u8 lpf; | 50 | u8 lpf; |
| 51 | u8 accel_lpf; | ||
| 50 | u8 user_ctrl; | 52 | u8 user_ctrl; |
| 51 | u8 fifo_en; | 53 | u8 fifo_en; |
| 52 | u8 gyro_config; | 54 | u8 gyro_config; |
| @@ -188,6 +190,7 @@ struct inv_mpu6050_state { | |||
| 188 | #define INV_MPU6050_FIFO_THRESHOLD 500 | 190 | #define INV_MPU6050_FIFO_THRESHOLD 500 |
| 189 | 191 | ||
| 190 | /* mpu6500 registers */ | 192 | /* mpu6500 registers */ |
| 193 | #define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D | ||
| 191 | #define INV_MPU6500_REG_ACCEL_OFFSET 0x77 | 194 | #define INV_MPU6500_REG_ACCEL_OFFSET 0x77 |
| 192 | 195 | ||
| 193 | /* delay time in milliseconds */ | 196 | /* delay time in milliseconds */ |
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index 978e1592c2a3..4061fed93f1f 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c | |||
| @@ -451,7 +451,8 @@ static ssize_t iio_trigger_write_current(struct device *dev, | |||
| 451 | return len; | 451 | return len; |
| 452 | 452 | ||
| 453 | out_trigger_put: | 453 | out_trigger_put: |
| 454 | iio_trigger_put(trig); | 454 | if (trig) |
| 455 | iio_trigger_put(trig); | ||
| 455 | return ret; | 456 | return ret; |
| 456 | } | 457 | } |
| 457 | 458 | ||
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index b30e0c1c6cc4..67838edd8b37 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c | |||
| @@ -74,9 +74,9 @@ static const int int_time_mapping[] = {100000, 50000, 200000, 400000}; | |||
| 74 | static const struct reg_field reg_field_it = | 74 | static const struct reg_field reg_field_it = |
| 75 | REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4); | 75 | REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4); |
| 76 | static const struct reg_field reg_field_als_intr = | 76 | static const struct reg_field reg_field_als_intr = |
| 77 | REG_FIELD(LTR501_INTR, 0, 0); | ||
| 78 | static const struct reg_field reg_field_ps_intr = | ||
| 79 | REG_FIELD(LTR501_INTR, 1, 1); | 77 | REG_FIELD(LTR501_INTR, 1, 1); |
| 78 | static const struct reg_field reg_field_ps_intr = | ||
| 79 | REG_FIELD(LTR501_INTR, 0, 0); | ||
| 80 | static const struct reg_field reg_field_als_rate = | 80 | static const struct reg_field reg_field_als_rate = |
| 81 | REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2); | 81 | REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2); |
| 82 | static const struct reg_field reg_field_ps_rate = | 82 | static const struct reg_field reg_field_ps_rate = |
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c index ddf9bee89f77..aa4df0dcc8c9 100644 --- a/drivers/iio/proximity/as3935.c +++ b/drivers/iio/proximity/as3935.c | |||
| @@ -40,9 +40,9 @@ | |||
| 40 | #define AS3935_AFE_PWR_BIT BIT(0) | 40 | #define AS3935_AFE_PWR_BIT BIT(0) |
| 41 | 41 | ||
| 42 | #define AS3935_INT 0x03 | 42 | #define AS3935_INT 0x03 |
| 43 | #define AS3935_INT_MASK 0x07 | 43 | #define AS3935_INT_MASK 0x0f |
| 44 | #define AS3935_EVENT_INT BIT(3) | 44 | #define AS3935_EVENT_INT BIT(3) |
| 45 | #define AS3935_NOISE_INT BIT(1) | 45 | #define AS3935_NOISE_INT BIT(0) |
| 46 | 46 | ||
| 47 | #define AS3935_DATA 0x07 | 47 | #define AS3935_DATA 0x07 |
| 48 | #define AS3935_DATA_MASK 0x3F | 48 | #define AS3935_DATA_MASK 0x3F |
| @@ -215,7 +215,7 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private) | |||
| 215 | 215 | ||
| 216 | st->buffer[0] = val & AS3935_DATA_MASK; | 216 | st->buffer[0] = val & AS3935_DATA_MASK; |
| 217 | iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer, | 217 | iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer, |
| 218 | pf->timestamp); | 218 | iio_get_time_ns(indio_dev)); |
| 219 | err_read: | 219 | err_read: |
| 220 | iio_trigger_notify_done(indio_dev->trig); | 220 | iio_trigger_notify_done(indio_dev->trig); |
| 221 | 221 | ||
| @@ -244,7 +244,7 @@ static void as3935_event_work(struct work_struct *work) | |||
| 244 | 244 | ||
| 245 | switch (val) { | 245 | switch (val) { |
| 246 | case AS3935_EVENT_INT: | 246 | case AS3935_EVENT_INT: |
| 247 | iio_trigger_poll(st->trig); | 247 | iio_trigger_poll_chained(st->trig); |
| 248 | break; | 248 | break; |
| 249 | case AS3935_NOISE_INT: | 249 | case AS3935_NOISE_INT: |
| 250 | dev_warn(&st->spi->dev, "noise level is too high\n"); | 250 | dev_warn(&st->spi->dev, "noise level is too high\n"); |
| @@ -269,8 +269,6 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private) | |||
| 269 | 269 | ||
| 270 | static void calibrate_as3935(struct as3935_state *st) | 270 | static void calibrate_as3935(struct as3935_state *st) |
| 271 | { | 271 | { |
| 272 | mutex_lock(&st->lock); | ||
| 273 | |||
| 274 | /* mask disturber interrupt bit */ | 272 | /* mask disturber interrupt bit */ |
| 275 | as3935_write(st, AS3935_INT, BIT(5)); | 273 | as3935_write(st, AS3935_INT, BIT(5)); |
| 276 | 274 | ||
| @@ -280,8 +278,6 @@ static void calibrate_as3935(struct as3935_state *st) | |||
| 280 | 278 | ||
| 281 | mdelay(2); | 279 | mdelay(2); |
| 282 | as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV)); | 280 | as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV)); |
| 283 | |||
| 284 | mutex_unlock(&st->lock); | ||
| 285 | } | 281 | } |
| 286 | 282 | ||
| 287 | #ifdef CONFIG_PM_SLEEP | 283 | #ifdef CONFIG_PM_SLEEP |
| @@ -318,6 +314,8 @@ static int as3935_resume(struct device *dev) | |||
| 318 | val &= ~AS3935_AFE_PWR_BIT; | 314 | val &= ~AS3935_AFE_PWR_BIT; |
| 319 | ret = as3935_write(st, AS3935_AFE_GAIN, val); | 315 | ret = as3935_write(st, AS3935_AFE_GAIN, val); |
| 320 | 316 | ||
| 317 | calibrate_as3935(st); | ||
| 318 | |||
| 321 | err_resume: | 319 | err_resume: |
| 322 | mutex_unlock(&st->lock); | 320 | mutex_unlock(&st->lock); |
| 323 | 321 | ||
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 02971e239a18..ece6926fa2e6 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
| @@ -449,12 +449,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, | |||
| 449 | return ret; | 449 | return ret; |
| 450 | 450 | ||
| 451 | rt = (struct rt6_info *)dst; | 451 | rt = (struct rt6_info *)dst; |
| 452 | if (ipv6_addr_any(&fl6.saddr)) { | 452 | if (ipv6_addr_any(&src_in->sin6_addr)) { |
| 453 | ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev, | ||
| 454 | &fl6.daddr, 0, &fl6.saddr); | ||
| 455 | if (ret) | ||
| 456 | goto put; | ||
| 457 | |||
| 458 | src_in->sin6_family = AF_INET6; | 453 | src_in->sin6_family = AF_INET6; |
| 459 | src_in->sin6_addr = fl6.saddr; | 454 | src_in->sin6_addr = fl6.saddr; |
| 460 | } | 455 | } |
| @@ -471,9 +466,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, | |||
| 471 | 466 | ||
| 472 | *pdst = dst; | 467 | *pdst = dst; |
| 473 | return 0; | 468 | return 0; |
| 474 | put: | ||
| 475 | dst_release(dst); | ||
| 476 | return ret; | ||
| 477 | } | 469 | } |
| 478 | #else | 470 | #else |
| 479 | static int addr6_resolve(struct sockaddr_in6 *src_in, | 471 | static int addr6_resolve(struct sockaddr_in6 *src_in, |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 1844770f3ae8..2b4d613a3474 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
| @@ -1429,7 +1429,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg, | |||
| 1429 | primary_path->packet_life_time = | 1429 | primary_path->packet_life_time = |
| 1430 | cm_req_get_primary_local_ack_timeout(req_msg); | 1430 | cm_req_get_primary_local_ack_timeout(req_msg); |
| 1431 | primary_path->packet_life_time -= (primary_path->packet_life_time > 0); | 1431 | primary_path->packet_life_time -= (primary_path->packet_life_time > 0); |
| 1432 | sa_path_set_service_id(primary_path, req_msg->service_id); | 1432 | primary_path->service_id = req_msg->service_id; |
| 1433 | 1433 | ||
| 1434 | if (req_msg->alt_local_lid) { | 1434 | if (req_msg->alt_local_lid) { |
| 1435 | alt_path->dgid = req_msg->alt_local_gid; | 1435 | alt_path->dgid = req_msg->alt_local_gid; |
| @@ -1452,7 +1452,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg, | |||
| 1452 | alt_path->packet_life_time = | 1452 | alt_path->packet_life_time = |
| 1453 | cm_req_get_alt_local_ack_timeout(req_msg); | 1453 | cm_req_get_alt_local_ack_timeout(req_msg); |
| 1454 | alt_path->packet_life_time -= (alt_path->packet_life_time > 0); | 1454 | alt_path->packet_life_time -= (alt_path->packet_life_time > 0); |
| 1455 | sa_path_set_service_id(alt_path, req_msg->service_id); | 1455 | alt_path->service_id = req_msg->service_id; |
| 1456 | } | 1456 | } |
| 1457 | } | 1457 | } |
| 1458 | 1458 | ||
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 91b7a2fe5a55..31bb82d8ecd7 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -1140,7 +1140,7 @@ static void cma_save_ib_info(struct sockaddr *src_addr, | |||
| 1140 | ib->sib_pkey = path->pkey; | 1140 | ib->sib_pkey = path->pkey; |
| 1141 | ib->sib_flowinfo = path->flow_label; | 1141 | ib->sib_flowinfo = path->flow_label; |
| 1142 | memcpy(&ib->sib_addr, &path->sgid, 16); | 1142 | memcpy(&ib->sib_addr, &path->sgid, 16); |
| 1143 | ib->sib_sid = sa_path_get_service_id(path); | 1143 | ib->sib_sid = path->service_id; |
| 1144 | ib->sib_scope_id = 0; | 1144 | ib->sib_scope_id = 0; |
| 1145 | } else { | 1145 | } else { |
| 1146 | ib->sib_pkey = listen_ib->sib_pkey; | 1146 | ib->sib_pkey = listen_ib->sib_pkey; |
| @@ -1274,8 +1274,7 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event, | |||
| 1274 | memcpy(&req->local_gid, &req_param->primary_path->sgid, | 1274 | memcpy(&req->local_gid, &req_param->primary_path->sgid, |
| 1275 | sizeof(req->local_gid)); | 1275 | sizeof(req->local_gid)); |
| 1276 | req->has_gid = true; | 1276 | req->has_gid = true; |
| 1277 | req->service_id = | 1277 | req->service_id = req_param->primary_path->service_id; |
| 1278 | sa_path_get_service_id(req_param->primary_path); | ||
| 1279 | req->pkey = be16_to_cpu(req_param->primary_path->pkey); | 1278 | req->pkey = be16_to_cpu(req_param->primary_path->pkey); |
| 1280 | if (req->pkey != req_param->bth_pkey) | 1279 | if (req->pkey != req_param->bth_pkey) |
| 1281 | pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" | 1280 | pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" |
| @@ -1827,7 +1826,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, | |||
| 1827 | struct rdma_route *rt; | 1826 | struct rdma_route *rt; |
| 1828 | const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; | 1827 | const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; |
| 1829 | struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; | 1828 | struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; |
| 1830 | const __be64 service_id = sa_path_get_service_id(path); | 1829 | const __be64 service_id = |
| 1830 | ib_event->param.req_rcvd.primary_path->service_id; | ||
| 1831 | int ret; | 1831 | int ret; |
| 1832 | 1832 | ||
| 1833 | id = rdma_create_id(listen_id->route.addr.dev_addr.net, | 1833 | id = rdma_create_id(listen_id->route.addr.dev_addr.net, |
| @@ -2345,9 +2345,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, | |||
| 2345 | path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); | 2345 | path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); |
| 2346 | path_rec.numb_path = 1; | 2346 | path_rec.numb_path = 1; |
| 2347 | path_rec.reversible = 1; | 2347 | path_rec.reversible = 1; |
| 2348 | sa_path_set_service_id(&path_rec, | 2348 | path_rec.service_id = rdma_get_service_id(&id_priv->id, |
| 2349 | rdma_get_service_id(&id_priv->id, | 2349 | cma_dst_addr(id_priv)); |
| 2350 | cma_dst_addr(id_priv))); | ||
| 2351 | 2350 | ||
| 2352 | comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | | 2351 | comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | |
| 2353 | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | | 2352 | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | |
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index cb7d372e4bdf..d92ab4eaa8f3 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h | |||
| @@ -169,6 +169,16 @@ void ib_mad_cleanup(void); | |||
| 169 | int ib_sa_init(void); | 169 | int ib_sa_init(void); |
| 170 | void ib_sa_cleanup(void); | 170 | void ib_sa_cleanup(void); |
| 171 | 171 | ||
| 172 | int ibnl_init(void); | ||
| 173 | void ibnl_cleanup(void); | ||
| 174 | |||
| 175 | /** | ||
| 176 | * Check if there are any listeners to the netlink group | ||
| 177 | * @group: the netlink group ID | ||
| 178 | * Returns 0 on success or a negative for no listeners. | ||
| 179 | */ | ||
| 180 | int ibnl_chk_listeners(unsigned int group); | ||
| 181 | |||
| 172 | int ib_nl_handle_resolve_resp(struct sk_buff *skb, | 182 | int ib_nl_handle_resolve_resp(struct sk_buff *skb, |
| 173 | struct netlink_callback *cb); | 183 | struct netlink_callback *cb); |
| 174 | int ib_nl_handle_set_timeout(struct sk_buff *skb, | 184 | int ib_nl_handle_set_timeout(struct sk_buff *skb, |
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index b784055423c8..94931c474d41 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include <net/net_namespace.h> | 37 | #include <net/net_namespace.h> |
| 38 | #include <net/sock.h> | 38 | #include <net/sock.h> |
| 39 | #include <rdma/rdma_netlink.h> | 39 | #include <rdma/rdma_netlink.h> |
| 40 | #include "core_priv.h" | ||
| 40 | 41 | ||
| 41 | struct ibnl_client { | 42 | struct ibnl_client { |
| 42 | struct list_head list; | 43 | struct list_head list; |
| @@ -55,7 +56,6 @@ int ibnl_chk_listeners(unsigned int group) | |||
| 55 | return -1; | 56 | return -1; |
| 56 | return 0; | 57 | return 0; |
| 57 | } | 58 | } |
| 58 | EXPORT_SYMBOL(ibnl_chk_listeners); | ||
| 59 | 59 | ||
| 60 | int ibnl_add_client(int index, int nops, | 60 | int ibnl_add_client(int index, int nops, |
| 61 | const struct ibnl_client_cbs cb_table[]) | 61 | const struct ibnl_client_cbs cb_table[]) |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index e335b09c022e..fb7aec4047c8 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
| @@ -194,7 +194,7 @@ static u32 tid; | |||
| 194 | .field_name = "sa_path_rec:" #field | 194 | .field_name = "sa_path_rec:" #field |
| 195 | 195 | ||
| 196 | static const struct ib_field path_rec_table[] = { | 196 | static const struct ib_field path_rec_table[] = { |
| 197 | { PATH_REC_FIELD(ib.service_id), | 197 | { PATH_REC_FIELD(service_id), |
| 198 | .offset_words = 0, | 198 | .offset_words = 0, |
| 199 | .offset_bits = 0, | 199 | .offset_bits = 0, |
| 200 | .size_bits = 64 }, | 200 | .size_bits = 64 }, |
| @@ -296,7 +296,7 @@ static const struct ib_field path_rec_table[] = { | |||
| 296 | .field_name = "sa_path_rec:" #field | 296 | .field_name = "sa_path_rec:" #field |
| 297 | 297 | ||
| 298 | static const struct ib_field opa_path_rec_table[] = { | 298 | static const struct ib_field opa_path_rec_table[] = { |
| 299 | { OPA_PATH_REC_FIELD(opa.service_id), | 299 | { OPA_PATH_REC_FIELD(service_id), |
| 300 | .offset_words = 0, | 300 | .offset_words = 0, |
| 301 | .offset_bits = 0, | 301 | .offset_bits = 0, |
| 302 | .size_bits = 64 }, | 302 | .size_bits = 64 }, |
| @@ -774,7 +774,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, | |||
| 774 | 774 | ||
| 775 | /* Now build the attributes */ | 775 | /* Now build the attributes */ |
| 776 | if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { | 776 | if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { |
| 777 | val64 = be64_to_cpu(sa_path_get_service_id(sa_rec)); | 777 | val64 = be64_to_cpu(sa_rec->service_id); |
| 778 | nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, | 778 | nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, |
| 779 | sizeof(val64), &val64); | 779 | sizeof(val64), &val64); |
| 780 | } | 780 | } |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 3dbf811d3c51..21e60b1e2ff4 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
| @@ -58,7 +58,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d | |||
| 58 | for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { | 58 | for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { |
| 59 | 59 | ||
| 60 | page = sg_page(sg); | 60 | page = sg_page(sg); |
| 61 | if (umem->writable && dirty) | 61 | if (!PageDirty(page) && umem->writable && dirty) |
| 62 | set_page_dirty_lock(page); | 62 | set_page_dirty_lock(page); |
| 63 | put_page(page); | 63 | put_page(page); |
| 64 | } | 64 | } |
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 0780b1afefa9..8c4ec564e495 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c | |||
| @@ -321,11 +321,15 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem, | |||
| 321 | struct vm_area_struct *vma; | 321 | struct vm_area_struct *vma; |
| 322 | struct hstate *h; | 322 | struct hstate *h; |
| 323 | 323 | ||
| 324 | down_read(&mm->mmap_sem); | ||
| 324 | vma = find_vma(mm, ib_umem_start(umem)); | 325 | vma = find_vma(mm, ib_umem_start(umem)); |
| 325 | if (!vma || !is_vm_hugetlb_page(vma)) | 326 | if (!vma || !is_vm_hugetlb_page(vma)) { |
| 327 | up_read(&mm->mmap_sem); | ||
| 326 | return -EINVAL; | 328 | return -EINVAL; |
| 329 | } | ||
| 327 | h = hstate_vma(vma); | 330 | h = hstate_vma(vma); |
| 328 | umem->page_shift = huge_page_shift(h); | 331 | umem->page_shift = huge_page_shift(h); |
| 332 | up_read(&mm->mmap_sem); | ||
| 329 | umem->hugetlb = 1; | 333 | umem->hugetlb = 1; |
| 330 | } else { | 334 | } else { |
| 331 | umem->hugetlb = 0; | 335 | umem->hugetlb = 0; |
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index 8b9587fe2303..94fd989c9060 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c | |||
| @@ -96,11 +96,11 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, | |||
| 96 | } | 96 | } |
| 97 | EXPORT_SYMBOL(ib_copy_qp_attr_to_user); | 97 | EXPORT_SYMBOL(ib_copy_qp_attr_to_user); |
| 98 | 98 | ||
| 99 | void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, | 99 | static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, |
| 100 | struct sa_path_rec *src) | 100 | struct sa_path_rec *src) |
| 101 | { | 101 | { |
| 102 | memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid); | 102 | memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid)); |
| 103 | memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid); | 103 | memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid)); |
| 104 | 104 | ||
| 105 | dst->dlid = htons(ntohl(sa_path_get_dlid(src))); | 105 | dst->dlid = htons(ntohl(sa_path_get_dlid(src))); |
| 106 | dst->slid = htons(ntohl(sa_path_get_slid(src))); | 106 | dst->slid = htons(ntohl(sa_path_get_slid(src))); |
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index ebf7be8d4139..08772836fded 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h | |||
| @@ -56,6 +56,10 @@ | |||
| 56 | #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) | 56 | #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) |
| 57 | #define BNXT_RE_MAX_CQ_COUNT (64 * 1024) | 57 | #define BNXT_RE_MAX_CQ_COUNT (64 * 1024) |
| 58 | 58 | ||
| 59 | #define BNXT_RE_UD_QP_HW_STALL 0x400000 | ||
| 60 | |||
| 61 | #define BNXT_RE_RQ_WQE_THRESHOLD 32 | ||
| 62 | |||
| 59 | struct bnxt_re_work { | 63 | struct bnxt_re_work { |
| 60 | struct work_struct work; | 64 | struct work_struct work; |
| 61 | unsigned long event; | 65 | unsigned long event; |
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 7ba9e699d7ab..c7bd68311d0c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c | |||
| @@ -61,6 +61,48 @@ | |||
| 61 | #include "ib_verbs.h" | 61 | #include "ib_verbs.h" |
| 62 | #include <rdma/bnxt_re-abi.h> | 62 | #include <rdma/bnxt_re-abi.h> |
| 63 | 63 | ||
| 64 | static int __from_ib_access_flags(int iflags) | ||
| 65 | { | ||
| 66 | int qflags = 0; | ||
| 67 | |||
| 68 | if (iflags & IB_ACCESS_LOCAL_WRITE) | ||
| 69 | qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; | ||
| 70 | if (iflags & IB_ACCESS_REMOTE_READ) | ||
| 71 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; | ||
| 72 | if (iflags & IB_ACCESS_REMOTE_WRITE) | ||
| 73 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; | ||
| 74 | if (iflags & IB_ACCESS_REMOTE_ATOMIC) | ||
| 75 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; | ||
| 76 | if (iflags & IB_ACCESS_MW_BIND) | ||
| 77 | qflags |= BNXT_QPLIB_ACCESS_MW_BIND; | ||
| 78 | if (iflags & IB_ZERO_BASED) | ||
| 79 | qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; | ||
| 80 | if (iflags & IB_ACCESS_ON_DEMAND) | ||
| 81 | qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; | ||
| 82 | return qflags; | ||
| 83 | }; | ||
| 84 | |||
| 85 | static enum ib_access_flags __to_ib_access_flags(int qflags) | ||
| 86 | { | ||
| 87 | enum ib_access_flags iflags = 0; | ||
| 88 | |||
| 89 | if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) | ||
| 90 | iflags |= IB_ACCESS_LOCAL_WRITE; | ||
| 91 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) | ||
| 92 | iflags |= IB_ACCESS_REMOTE_WRITE; | ||
| 93 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) | ||
| 94 | iflags |= IB_ACCESS_REMOTE_READ; | ||
| 95 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) | ||
| 96 | iflags |= IB_ACCESS_REMOTE_ATOMIC; | ||
| 97 | if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) | ||
| 98 | iflags |= IB_ACCESS_MW_BIND; | ||
| 99 | if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) | ||
| 100 | iflags |= IB_ZERO_BASED; | ||
| 101 | if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) | ||
| 102 | iflags |= IB_ACCESS_ON_DEMAND; | ||
| 103 | return iflags; | ||
| 104 | }; | ||
| 105 | |||
| 64 | static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, | 106 | static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, |
| 65 | struct bnxt_qplib_sge *sg_list, int num) | 107 | struct bnxt_qplib_sge *sg_list, int num) |
| 66 | { | 108 | { |
| @@ -149,8 +191,8 @@ int bnxt_re_query_device(struct ib_device *ibdev, | |||
| 149 | ib_attr->max_total_mcast_qp_attach = 0; | 191 | ib_attr->max_total_mcast_qp_attach = 0; |
| 150 | ib_attr->max_ah = dev_attr->max_ah; | 192 | ib_attr->max_ah = dev_attr->max_ah; |
| 151 | 193 | ||
| 152 | ib_attr->max_fmr = dev_attr->max_fmr; | 194 | ib_attr->max_fmr = 0; |
| 153 | ib_attr->max_map_per_fmr = 1; /* ? */ | 195 | ib_attr->max_map_per_fmr = 0; |
| 154 | 196 | ||
| 155 | ib_attr->max_srq = dev_attr->max_srq; | 197 | ib_attr->max_srq = dev_attr->max_srq; |
| 156 | ib_attr->max_srq_wr = dev_attr->max_srq_wqes; | 198 | ib_attr->max_srq_wr = dev_attr->max_srq_wqes; |
| @@ -410,6 +452,158 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, | |||
| 410 | return IB_LINK_LAYER_ETHERNET; | 452 | return IB_LINK_LAYER_ETHERNET; |
| 411 | } | 453 | } |
| 412 | 454 | ||
| 455 | #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE) | ||
| 456 | |||
| 457 | static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd) | ||
| 458 | { | ||
| 459 | struct bnxt_re_fence_data *fence = &pd->fence; | ||
| 460 | struct ib_mr *ib_mr = &fence->mr->ib_mr; | ||
| 461 | struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; | ||
| 462 | |||
| 463 | memset(wqe, 0, sizeof(*wqe)); | ||
| 464 | wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; | ||
| 465 | wqe->wr_id = BNXT_QPLIB_FENCE_WRID; | ||
| 466 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; | ||
| 467 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; | ||
| 468 | wqe->bind.zero_based = false; | ||
| 469 | wqe->bind.parent_l_key = ib_mr->lkey; | ||
| 470 | wqe->bind.va = (u64)(unsigned long)fence->va; | ||
| 471 | wqe->bind.length = fence->size; | ||
| 472 | wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ); | ||
| 473 | wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1; | ||
| 474 | |||
| 475 | /* Save the initial rkey in fence structure for now; | ||
| 476 | * wqe->bind.r_key will be set at (re)bind time. | ||
| 477 | */ | ||
| 478 | fence->bind_rkey = ib_inc_rkey(fence->mw->rkey); | ||
| 479 | } | ||
| 480 | |||
| 481 | static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) | ||
| 482 | { | ||
| 483 | struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, | ||
| 484 | qplib_qp); | ||
| 485 | struct ib_pd *ib_pd = qp->ib_qp.pd; | ||
| 486 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); | ||
| 487 | struct bnxt_re_fence_data *fence = &pd->fence; | ||
| 488 | struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe; | ||
| 489 | struct bnxt_qplib_swqe wqe; | ||
| 490 | int rc; | ||
| 491 | |||
| 492 | memcpy(&wqe, fence_wqe, sizeof(wqe)); | ||
| 493 | wqe.bind.r_key = fence->bind_rkey; | ||
| 494 | fence->bind_rkey = ib_inc_rkey(fence->bind_rkey); | ||
| 495 | |||
| 496 | dev_dbg(rdev_to_dev(qp->rdev), | ||
| 497 | "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", | ||
| 498 | wqe.bind.r_key, qp->qplib_qp.id, pd); | ||
| 499 | rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); | ||
| 500 | if (rc) { | ||
| 501 | dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n"); | ||
| 502 | return rc; | ||
| 503 | } | ||
| 504 | bnxt_qplib_post_send_db(&qp->qplib_qp); | ||
| 505 | |||
| 506 | return rc; | ||
| 507 | } | ||
| 508 | |||
| 509 | static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd) | ||
| 510 | { | ||
| 511 | struct bnxt_re_fence_data *fence = &pd->fence; | ||
| 512 | struct bnxt_re_dev *rdev = pd->rdev; | ||
| 513 | struct device *dev = &rdev->en_dev->pdev->dev; | ||
| 514 | struct bnxt_re_mr *mr = fence->mr; | ||
| 515 | |||
| 516 | if (fence->mw) { | ||
| 517 | bnxt_re_dealloc_mw(fence->mw); | ||
| 518 | fence->mw = NULL; | ||
| 519 | } | ||
| 520 | if (mr) { | ||
| 521 | if (mr->ib_mr.rkey) | ||
| 522 | bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr, | ||
| 523 | true); | ||
| 524 | if (mr->ib_mr.lkey) | ||
| 525 | bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); | ||
| 526 | kfree(mr); | ||
| 527 | fence->mr = NULL; | ||
| 528 | } | ||
| 529 | if (fence->dma_addr) { | ||
| 530 | dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES, | ||
| 531 | DMA_BIDIRECTIONAL); | ||
| 532 | fence->dma_addr = 0; | ||
| 533 | } | ||
| 534 | } | ||
| 535 | |||
| 536 | static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) | ||
| 537 | { | ||
| 538 | int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; | ||
| 539 | struct bnxt_re_fence_data *fence = &pd->fence; | ||
| 540 | struct bnxt_re_dev *rdev = pd->rdev; | ||
| 541 | struct device *dev = &rdev->en_dev->pdev->dev; | ||
| 542 | struct bnxt_re_mr *mr = NULL; | ||
| 543 | dma_addr_t dma_addr = 0; | ||
| 544 | struct ib_mw *mw; | ||
| 545 | u64 pbl_tbl; | ||
| 546 | int rc; | ||
| 547 | |||
| 548 | dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, | ||
| 549 | DMA_BIDIRECTIONAL); | ||
| 550 | rc = dma_mapping_error(dev, dma_addr); | ||
| 551 | if (rc) { | ||
| 552 | dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n"); | ||
| 553 | rc = -EIO; | ||
| 554 | fence->dma_addr = 0; | ||
| 555 | goto fail; | ||
| 556 | } | ||
| 557 | fence->dma_addr = dma_addr; | ||
| 558 | |||
| 559 | /* Allocate a MR */ | ||
| 560 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | ||
| 561 | if (!mr) { | ||
| 562 | rc = -ENOMEM; | ||
| 563 | goto fail; | ||
| 564 | } | ||
| 565 | fence->mr = mr; | ||
| 566 | mr->rdev = rdev; | ||
| 567 | mr->qplib_mr.pd = &pd->qplib_pd; | ||
| 568 | mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; | ||
| 569 | mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); | ||
| 570 | rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); | ||
| 571 | if (rc) { | ||
| 572 | dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n"); | ||
| 573 | goto fail; | ||
| 574 | } | ||
| 575 | |||
| 576 | /* Register MR */ | ||
| 577 | mr->ib_mr.lkey = mr->qplib_mr.lkey; | ||
| 578 | mr->qplib_mr.va = (u64)(unsigned long)fence->va; | ||
| 579 | mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; | ||
| 580 | pbl_tbl = dma_addr; | ||
| 581 | rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl, | ||
| 582 | BNXT_RE_FENCE_PBL_SIZE, false); | ||
| 583 | if (rc) { | ||
| 584 | dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n"); | ||
| 585 | goto fail; | ||
| 586 | } | ||
| 587 | mr->ib_mr.rkey = mr->qplib_mr.rkey; | ||
| 588 | |||
| 589 | /* Create a fence MW only for kernel consumers */ | ||
| 590 | mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); | ||
| 591 | if (!mw) { | ||
| 592 | dev_err(rdev_to_dev(rdev), | ||
| 593 | "Failed to create fence-MW for PD: %p\n", pd); | ||
| 594 | rc = -EINVAL; | ||
| 595 | goto fail; | ||
| 596 | } | ||
| 597 | fence->mw = mw; | ||
| 598 | |||
| 599 | bnxt_re_create_fence_wqe(pd); | ||
| 600 | return 0; | ||
| 601 | |||
| 602 | fail: | ||
| 603 | bnxt_re_destroy_fence_mr(pd); | ||
| 604 | return rc; | ||
| 605 | } | ||
| 606 | |||
| 413 | /* Protection Domains */ | 607 | /* Protection Domains */ |
| 414 | int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) | 608 | int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) |
| 415 | { | 609 | { |
| @@ -417,6 +611,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) | |||
| 417 | struct bnxt_re_dev *rdev = pd->rdev; | 611 | struct bnxt_re_dev *rdev = pd->rdev; |
| 418 | int rc; | 612 | int rc; |
| 419 | 613 | ||
| 614 | bnxt_re_destroy_fence_mr(pd); | ||
| 420 | if (ib_pd->uobject && pd->dpi.dbr) { | 615 | if (ib_pd->uobject && pd->dpi.dbr) { |
| 421 | struct ib_ucontext *ib_uctx = ib_pd->uobject->context; | 616 | struct ib_ucontext *ib_uctx = ib_pd->uobject->context; |
| 422 | struct bnxt_re_ucontext *ucntx; | 617 | struct bnxt_re_ucontext *ucntx; |
| @@ -498,6 +693,10 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, | |||
| 498 | } | 693 | } |
| 499 | } | 694 | } |
| 500 | 695 | ||
| 696 | if (!udata) | ||
| 697 | if (bnxt_re_create_fence_mr(pd)) | ||
| 698 | dev_warn(rdev_to_dev(rdev), | ||
| 699 | "Failed to create Fence-MR\n"); | ||
| 501 | return &pd->ib_pd; | 700 | return &pd->ib_pd; |
| 502 | dbfail: | 701 | dbfail: |
| 503 | (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, | 702 | (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, |
| @@ -849,12 +1048,16 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp | |||
| 849 | /* Shadow QP SQ depth should be same as QP1 RQ depth */ | 1048 | /* Shadow QP SQ depth should be same as QP1 RQ depth */ |
| 850 | qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; | 1049 | qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; |
| 851 | qp->qplib_qp.sq.max_sge = 2; | 1050 | qp->qplib_qp.sq.max_sge = 2; |
| 1051 | /* Q full delta can be 1 since it is internal QP */ | ||
| 1052 | qp->qplib_qp.sq.q_full_delta = 1; | ||
| 852 | 1053 | ||
| 853 | qp->qplib_qp.scq = qp1_qp->scq; | 1054 | qp->qplib_qp.scq = qp1_qp->scq; |
| 854 | qp->qplib_qp.rcq = qp1_qp->rcq; | 1055 | qp->qplib_qp.rcq = qp1_qp->rcq; |
| 855 | 1056 | ||
| 856 | qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; | 1057 | qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; |
| 857 | qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; | 1058 | qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; |
| 1059 | /* Q full delta can be 1 since it is internal QP */ | ||
| 1060 | qp->qplib_qp.rq.q_full_delta = 1; | ||
| 858 | 1061 | ||
| 859 | qp->qplib_qp.mtu = qp1_qp->mtu; | 1062 | qp->qplib_qp.mtu = qp1_qp->mtu; |
| 860 | 1063 | ||
| @@ -917,10 +1120,6 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
| 917 | qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == | 1120 | qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == |
| 918 | IB_SIGNAL_ALL_WR) ? true : false); | 1121 | IB_SIGNAL_ALL_WR) ? true : false); |
| 919 | 1122 | ||
| 920 | entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); | ||
| 921 | qp->qplib_qp.sq.max_wqe = min_t(u32, entries, | ||
| 922 | dev_attr->max_qp_wqes + 1); | ||
| 923 | |||
| 924 | qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; | 1123 | qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; |
| 925 | if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) | 1124 | if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) |
| 926 | qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; | 1125 | qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; |
| @@ -959,6 +1158,9 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
| 959 | qp->qplib_qp.rq.max_wqe = min_t(u32, entries, | 1158 | qp->qplib_qp.rq.max_wqe = min_t(u32, entries, |
| 960 | dev_attr->max_qp_wqes + 1); | 1159 | dev_attr->max_qp_wqes + 1); |
| 961 | 1160 | ||
| 1161 | qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - | ||
| 1162 | qp_init_attr->cap.max_recv_wr; | ||
| 1163 | |||
| 962 | qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; | 1164 | qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; |
| 963 | if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) | 1165 | if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) |
| 964 | qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; | 1166 | qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; |
| @@ -967,6 +1169,12 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
| 967 | qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); | 1169 | qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); |
| 968 | 1170 | ||
| 969 | if (qp_init_attr->qp_type == IB_QPT_GSI) { | 1171 | if (qp_init_attr->qp_type == IB_QPT_GSI) { |
| 1172 | /* Allocate 1 more than what's provided */ | ||
| 1173 | entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); | ||
| 1174 | qp->qplib_qp.sq.max_wqe = min_t(u32, entries, | ||
| 1175 | dev_attr->max_qp_wqes + 1); | ||
| 1176 | qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - | ||
| 1177 | qp_init_attr->cap.max_send_wr; | ||
| 970 | qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; | 1178 | qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; |
| 971 | if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) | 1179 | if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) |
| 972 | qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; | 1180 | qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; |
| @@ -1006,6 +1214,22 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
| 1006 | } | 1214 | } |
| 1007 | 1215 | ||
| 1008 | } else { | 1216 | } else { |
| 1217 | /* Allocate 128 + 1 more than what's provided */ | ||
| 1218 | entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + | ||
| 1219 | BNXT_QPLIB_RESERVED_QP_WRS + 1); | ||
| 1220 | qp->qplib_qp.sq.max_wqe = min_t(u32, entries, | ||
| 1221 | dev_attr->max_qp_wqes + | ||
| 1222 | BNXT_QPLIB_RESERVED_QP_WRS + 1); | ||
| 1223 | qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; | ||
| 1224 | |||
| 1225 | /* | ||
| 1226 | * Reserving one slot for Phantom WQE. Application can | ||
| 1227 | * post one extra entry in this case. But allowing this to avoid | ||
| 1228 | * unexpected Queue full condition | ||
| 1229 | */ | ||
| 1230 | |||
| 1231 | qp->qplib_qp.sq.q_full_delta -= 1; | ||
| 1232 | |||
| 1009 | qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; | 1233 | qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; |
| 1010 | qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; | 1234 | qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; |
| 1011 | if (udata) { | 1235 | if (udata) { |
| @@ -1025,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
| 1025 | 1249 | ||
| 1026 | qp->ib_qp.qp_num = qp->qplib_qp.id; | 1250 | qp->ib_qp.qp_num = qp->qplib_qp.id; |
| 1027 | spin_lock_init(&qp->sq_lock); | 1251 | spin_lock_init(&qp->sq_lock); |
| 1252 | spin_lock_init(&qp->rq_lock); | ||
| 1028 | 1253 | ||
| 1029 | if (udata) { | 1254 | if (udata) { |
| 1030 | struct bnxt_re_qp_resp resp; | 1255 | struct bnxt_re_qp_resp resp; |
| @@ -1129,48 +1354,6 @@ static enum ib_mtu __to_ib_mtu(u32 mtu) | |||
| 1129 | } | 1354 | } |
| 1130 | } | 1355 | } |
| 1131 | 1356 | ||
| 1132 | static int __from_ib_access_flags(int iflags) | ||
| 1133 | { | ||
| 1134 | int qflags = 0; | ||
| 1135 | |||
| 1136 | if (iflags & IB_ACCESS_LOCAL_WRITE) | ||
| 1137 | qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; | ||
| 1138 | if (iflags & IB_ACCESS_REMOTE_READ) | ||
| 1139 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; | ||
| 1140 | if (iflags & IB_ACCESS_REMOTE_WRITE) | ||
| 1141 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; | ||
| 1142 | if (iflags & IB_ACCESS_REMOTE_ATOMIC) | ||
| 1143 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; | ||
| 1144 | if (iflags & IB_ACCESS_MW_BIND) | ||
| 1145 | qflags |= BNXT_QPLIB_ACCESS_MW_BIND; | ||
| 1146 | if (iflags & IB_ZERO_BASED) | ||
| 1147 | qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; | ||
| 1148 | if (iflags & IB_ACCESS_ON_DEMAND) | ||
| 1149 | qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; | ||
| 1150 | return qflags; | ||
| 1151 | }; | ||
| 1152 | |||
| 1153 | static enum ib_access_flags __to_ib_access_flags(int qflags) | ||
| 1154 | { | ||
| 1155 | enum ib_access_flags iflags = 0; | ||
| 1156 | |||
| 1157 | if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) | ||
| 1158 | iflags |= IB_ACCESS_LOCAL_WRITE; | ||
| 1159 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) | ||
| 1160 | iflags |= IB_ACCESS_REMOTE_WRITE; | ||
| 1161 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) | ||
| 1162 | iflags |= IB_ACCESS_REMOTE_READ; | ||
| 1163 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) | ||
| 1164 | iflags |= IB_ACCESS_REMOTE_ATOMIC; | ||
| 1165 | if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) | ||
| 1166 | iflags |= IB_ACCESS_MW_BIND; | ||
| 1167 | if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) | ||
| 1168 | iflags |= IB_ZERO_BASED; | ||
| 1169 | if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) | ||
| 1170 | iflags |= IB_ACCESS_ON_DEMAND; | ||
| 1171 | return iflags; | ||
| 1172 | }; | ||
| 1173 | |||
| 1174 | static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, | 1357 | static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, |
| 1175 | struct bnxt_re_qp *qp1_qp, | 1358 | struct bnxt_re_qp *qp1_qp, |
| 1176 | int qp_attr_mask) | 1359 | int qp_attr_mask) |
| @@ -1378,11 +1561,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, | |||
| 1378 | entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); | 1561 | entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); |
| 1379 | qp->qplib_qp.sq.max_wqe = min_t(u32, entries, | 1562 | qp->qplib_qp.sq.max_wqe = min_t(u32, entries, |
| 1380 | dev_attr->max_qp_wqes + 1); | 1563 | dev_attr->max_qp_wqes + 1); |
| 1564 | qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - | ||
| 1565 | qp_attr->cap.max_send_wr; | ||
| 1566 | /* | ||
| 1567 | * Reserving one slot for Phantom WQE. Some application can | ||
| 1568 | * post one extra entry in this case. Allowing this to avoid | ||
| 1569 | * unexpected Queue full condition | ||
| 1570 | */ | ||
| 1571 | qp->qplib_qp.sq.q_full_delta -= 1; | ||
| 1381 | qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; | 1572 | qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; |
| 1382 | if (qp->qplib_qp.rq.max_wqe) { | 1573 | if (qp->qplib_qp.rq.max_wqe) { |
| 1383 | entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); | 1574 | entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); |
| 1384 | qp->qplib_qp.rq.max_wqe = | 1575 | qp->qplib_qp.rq.max_wqe = |
| 1385 | min_t(u32, entries, dev_attr->max_qp_wqes + 1); | 1576 | min_t(u32, entries, dev_attr->max_qp_wqes + 1); |
| 1577 | qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - | ||
| 1578 | qp_attr->cap.max_recv_wr; | ||
| 1386 | qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; | 1579 | qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; |
| 1387 | } else { | 1580 | } else { |
| 1388 | /* SRQ was used prior, just ignore the RQ caps */ | 1581 | /* SRQ was used prior, just ignore the RQ caps */ |
| @@ -1883,6 +2076,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev, | |||
| 1883 | return payload_sz; | 2076 | return payload_sz; |
| 1884 | } | 2077 | } |
| 1885 | 2078 | ||
| 2079 | static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) | ||
| 2080 | { | ||
| 2081 | if ((qp->ib_qp.qp_type == IB_QPT_UD || | ||
| 2082 | qp->ib_qp.qp_type == IB_QPT_GSI || | ||
| 2083 | qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && | ||
| 2084 | qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { | ||
| 2085 | int qp_attr_mask; | ||
| 2086 | struct ib_qp_attr qp_attr; | ||
| 2087 | |||
| 2088 | qp_attr_mask = IB_QP_STATE; | ||
| 2089 | qp_attr.qp_state = IB_QPS_RTS; | ||
| 2090 | bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL); | ||
| 2091 | qp->qplib_qp.wqe_cnt = 0; | ||
| 2092 | } | ||
| 2093 | } | ||
| 2094 | |||
| 1886 | static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, | 2095 | static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, |
| 1887 | struct bnxt_re_qp *qp, | 2096 | struct bnxt_re_qp *qp, |
| 1888 | struct ib_send_wr *wr) | 2097 | struct ib_send_wr *wr) |
| @@ -1928,6 +2137,7 @@ bad: | |||
| 1928 | wr = wr->next; | 2137 | wr = wr->next; |
| 1929 | } | 2138 | } |
| 1930 | bnxt_qplib_post_send_db(&qp->qplib_qp); | 2139 | bnxt_qplib_post_send_db(&qp->qplib_qp); |
| 2140 | bnxt_ud_qp_hw_stall_workaround(qp); | ||
| 1931 | spin_unlock_irqrestore(&qp->sq_lock, flags); | 2141 | spin_unlock_irqrestore(&qp->sq_lock, flags); |
| 1932 | return rc; | 2142 | return rc; |
| 1933 | } | 2143 | } |
| @@ -2024,6 +2234,7 @@ bad: | |||
| 2024 | wr = wr->next; | 2234 | wr = wr->next; |
| 2025 | } | 2235 | } |
| 2026 | bnxt_qplib_post_send_db(&qp->qplib_qp); | 2236 | bnxt_qplib_post_send_db(&qp->qplib_qp); |
| 2237 | bnxt_ud_qp_hw_stall_workaround(qp); | ||
| 2027 | spin_unlock_irqrestore(&qp->sq_lock, flags); | 2238 | spin_unlock_irqrestore(&qp->sq_lock, flags); |
| 2028 | 2239 | ||
| 2029 | return rc; | 2240 | return rc; |
| @@ -2071,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr, | |||
| 2071 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); | 2282 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
| 2072 | struct bnxt_qplib_swqe wqe; | 2283 | struct bnxt_qplib_swqe wqe; |
| 2073 | int rc = 0, payload_sz = 0; | 2284 | int rc = 0, payload_sz = 0; |
| 2285 | unsigned long flags; | ||
| 2286 | u32 count = 0; | ||
| 2074 | 2287 | ||
| 2288 | spin_lock_irqsave(&qp->rq_lock, flags); | ||
| 2075 | while (wr) { | 2289 | while (wr) { |
| 2076 | /* House keeping */ | 2290 | /* House keeping */ |
| 2077 | memset(&wqe, 0, sizeof(wqe)); | 2291 | memset(&wqe, 0, sizeof(wqe)); |
| @@ -2100,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr, | |||
| 2100 | *bad_wr = wr; | 2314 | *bad_wr = wr; |
| 2101 | break; | 2315 | break; |
| 2102 | } | 2316 | } |
| 2317 | |||
| 2318 | /* Ring DB if the RQEs posted reaches a threshold value */ | ||
| 2319 | if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) { | ||
| 2320 | bnxt_qplib_post_recv_db(&qp->qplib_qp); | ||
| 2321 | count = 0; | ||
| 2322 | } | ||
| 2323 | |||
| 2103 | wr = wr->next; | 2324 | wr = wr->next; |
| 2104 | } | 2325 | } |
| 2105 | bnxt_qplib_post_recv_db(&qp->qplib_qp); | 2326 | |
| 2327 | if (count) | ||
| 2328 | bnxt_qplib_post_recv_db(&qp->qplib_qp); | ||
| 2329 | |||
| 2330 | spin_unlock_irqrestore(&qp->rq_lock, flags); | ||
| 2331 | |||
| 2106 | return rc; | 2332 | return rc; |
| 2107 | } | 2333 | } |
| 2108 | 2334 | ||
| @@ -2643,12 +2869,36 @@ static void bnxt_re_process_res_ud_wc(struct ib_wc *wc, | |||
| 2643 | wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; | 2869 | wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; |
| 2644 | } | 2870 | } |
| 2645 | 2871 | ||
| 2872 | static int send_phantom_wqe(struct bnxt_re_qp *qp) | ||
| 2873 | { | ||
| 2874 | struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; | ||
| 2875 | unsigned long flags; | ||
| 2876 | int rc = 0; | ||
| 2877 | |||
| 2878 | spin_lock_irqsave(&qp->sq_lock, flags); | ||
| 2879 | |||
| 2880 | rc = bnxt_re_bind_fence_mw(lib_qp); | ||
| 2881 | if (!rc) { | ||
| 2882 | lib_qp->sq.phantom_wqe_cnt++; | ||
| 2883 | dev_dbg(&lib_qp->sq.hwq.pdev->dev, | ||
| 2884 | "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", | ||
| 2885 | lib_qp->id, lib_qp->sq.hwq.prod, | ||
| 2886 | HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), | ||
| 2887 | lib_qp->sq.phantom_wqe_cnt); | ||
| 2888 | } | ||
| 2889 | |||
| 2890 | spin_unlock_irqrestore(&qp->sq_lock, flags); | ||
| 2891 | return rc; | ||
| 2892 | } | ||
| 2893 | |||
| 2646 | int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) | 2894 | int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) |
| 2647 | { | 2895 | { |
| 2648 | struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); | 2896 | struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); |
| 2649 | struct bnxt_re_qp *qp; | 2897 | struct bnxt_re_qp *qp; |
| 2650 | struct bnxt_qplib_cqe *cqe; | 2898 | struct bnxt_qplib_cqe *cqe; |
| 2651 | int i, ncqe, budget; | 2899 | int i, ncqe, budget; |
| 2900 | struct bnxt_qplib_q *sq; | ||
| 2901 | struct bnxt_qplib_qp *lib_qp; | ||
| 2652 | u32 tbl_idx; | 2902 | u32 tbl_idx; |
| 2653 | struct bnxt_re_sqp_entries *sqp_entry = NULL; | 2903 | struct bnxt_re_sqp_entries *sqp_entry = NULL; |
| 2654 | unsigned long flags; | 2904 | unsigned long flags; |
| @@ -2661,7 +2911,21 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) | |||
| 2661 | } | 2911 | } |
| 2662 | cqe = &cq->cql[0]; | 2912 | cqe = &cq->cql[0]; |
| 2663 | while (budget) { | 2913 | while (budget) { |
| 2664 | ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget); | 2914 | lib_qp = NULL; |
| 2915 | ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp); | ||
| 2916 | if (lib_qp) { | ||
| 2917 | sq = &lib_qp->sq; | ||
| 2918 | if (sq->send_phantom) { | ||
| 2919 | qp = container_of(lib_qp, | ||
| 2920 | struct bnxt_re_qp, qplib_qp); | ||
| 2921 | if (send_phantom_wqe(qp) == -ENOMEM) | ||
| 2922 | dev_err(rdev_to_dev(cq->rdev), | ||
| 2923 | "Phantom failed! Scheduled to send again\n"); | ||
| 2924 | else | ||
| 2925 | sq->send_phantom = false; | ||
| 2926 | } | ||
| 2927 | } | ||
| 2928 | |||
| 2665 | if (!ncqe) | 2929 | if (!ncqe) |
| 2666 | break; | 2930 | break; |
| 2667 | 2931 | ||
| @@ -2822,6 +3086,12 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) | |||
| 2822 | struct bnxt_re_dev *rdev = mr->rdev; | 3086 | struct bnxt_re_dev *rdev = mr->rdev; |
| 2823 | int rc; | 3087 | int rc; |
| 2824 | 3088 | ||
| 3089 | rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); | ||
| 3090 | if (rc) { | ||
| 3091 | dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); | ||
| 3092 | return rc; | ||
| 3093 | } | ||
| 3094 | |||
| 2825 | if (mr->npages && mr->pages) { | 3095 | if (mr->npages && mr->pages) { |
| 2826 | rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, | 3096 | rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, |
| 2827 | &mr->qplib_frpl); | 3097 | &mr->qplib_frpl); |
| @@ -2829,8 +3099,6 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) | |||
| 2829 | mr->npages = 0; | 3099 | mr->npages = 0; |
| 2830 | mr->pages = NULL; | 3100 | mr->pages = NULL; |
| 2831 | } | 3101 | } |
| 2832 | rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); | ||
| 2833 | |||
| 2834 | if (!IS_ERR_OR_NULL(mr->ib_umem)) | 3102 | if (!IS_ERR_OR_NULL(mr->ib_umem)) |
| 2835 | ib_umem_release(mr->ib_umem); | 3103 | ib_umem_release(mr->ib_umem); |
| 2836 | 3104 | ||
| @@ -2914,97 +3182,52 @@ fail: | |||
| 2914 | return ERR_PTR(rc); | 3182 | return ERR_PTR(rc); |
| 2915 | } | 3183 | } |
| 2916 | 3184 | ||
| 2917 | /* Fast Memory Regions */ | 3185 | struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, |
| 2918 | struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags, | 3186 | struct ib_udata *udata) |
| 2919 | struct ib_fmr_attr *fmr_attr) | ||
| 2920 | { | 3187 | { |
| 2921 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); | 3188 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
| 2922 | struct bnxt_re_dev *rdev = pd->rdev; | 3189 | struct bnxt_re_dev *rdev = pd->rdev; |
| 2923 | struct bnxt_re_fmr *fmr; | 3190 | struct bnxt_re_mw *mw; |
| 2924 | int rc; | 3191 | int rc; |
| 2925 | 3192 | ||
| 2926 | if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS || | 3193 | mw = kzalloc(sizeof(*mw), GFP_KERNEL); |
| 2927 | fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) { | 3194 | if (!mw) |
| 2928 | dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit"); | ||
| 2929 | return ERR_PTR(-ENOMEM); | 3195 | return ERR_PTR(-ENOMEM); |
| 2930 | } | 3196 | mw->rdev = rdev; |
| 2931 | fmr = kzalloc(sizeof(*fmr), GFP_KERNEL); | 3197 | mw->qplib_mw.pd = &pd->qplib_pd; |
| 2932 | if (!fmr) | ||
| 2933 | return ERR_PTR(-ENOMEM); | ||
| 2934 | |||
| 2935 | fmr->rdev = rdev; | ||
| 2936 | fmr->qplib_fmr.pd = &pd->qplib_pd; | ||
| 2937 | fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; | ||
| 2938 | 3198 | ||
| 2939 | rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr); | 3199 | mw->qplib_mw.type = (type == IB_MW_TYPE_1 ? |
| 2940 | if (rc) | 3200 | CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 : |
| 3201 | CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); | ||
| 3202 | rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); | ||
| 3203 | if (rc) { | ||
| 3204 | dev_err(rdev_to_dev(rdev), "Allocate MW failed!"); | ||
| 2941 | goto fail; | 3205 | goto fail; |
| 3206 | } | ||
| 3207 | mw->ib_mw.rkey = mw->qplib_mw.rkey; | ||
| 2942 | 3208 | ||
| 2943 | fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags); | 3209 | atomic_inc(&rdev->mw_count); |
| 2944 | fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey; | 3210 | return &mw->ib_mw; |
| 2945 | fmr->ib_fmr.rkey = fmr->ib_fmr.lkey; | ||
| 2946 | 3211 | ||
| 2947 | atomic_inc(&rdev->mr_count); | ||
| 2948 | return &fmr->ib_fmr; | ||
| 2949 | fail: | 3212 | fail: |
| 2950 | kfree(fmr); | 3213 | kfree(mw); |
| 2951 | return ERR_PTR(rc); | 3214 | return ERR_PTR(rc); |
| 2952 | } | 3215 | } |
| 2953 | 3216 | ||
| 2954 | int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len, | 3217 | int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) |
| 2955 | u64 iova) | ||
| 2956 | { | 3218 | { |
| 2957 | struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, | 3219 | struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw); |
| 2958 | ib_fmr); | 3220 | struct bnxt_re_dev *rdev = mw->rdev; |
| 2959 | struct bnxt_re_dev *rdev = fmr->rdev; | ||
| 2960 | int rc; | 3221 | int rc; |
| 2961 | 3222 | ||
| 2962 | fmr->qplib_fmr.va = iova; | 3223 | rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); |
| 2963 | fmr->qplib_fmr.total_size = list_len * PAGE_SIZE; | 3224 | if (rc) { |
| 2964 | 3225 | dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc); | |
| 2965 | rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list, | 3226 | return rc; |
| 2966 | list_len, true); | ||
| 2967 | if (rc) | ||
| 2968 | dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!", | ||
| 2969 | fmr->ib_fmr.lkey); | ||
| 2970 | return rc; | ||
| 2971 | } | ||
| 2972 | |||
| 2973 | int bnxt_re_unmap_fmr(struct list_head *fmr_list) | ||
| 2974 | { | ||
| 2975 | struct bnxt_re_dev *rdev; | ||
| 2976 | struct bnxt_re_fmr *fmr; | ||
| 2977 | struct ib_fmr *ib_fmr; | ||
| 2978 | int rc = 0; | ||
| 2979 | |||
| 2980 | /* Validate each FMRs inside the fmr_list */ | ||
| 2981 | list_for_each_entry(ib_fmr, fmr_list, list) { | ||
| 2982 | fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr); | ||
| 2983 | rdev = fmr->rdev; | ||
| 2984 | |||
| 2985 | if (rdev) { | ||
| 2986 | rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res, | ||
| 2987 | &fmr->qplib_fmr, true); | ||
| 2988 | if (rc) | ||
| 2989 | break; | ||
| 2990 | } | ||
| 2991 | } | 3227 | } |
| 2992 | return rc; | ||
| 2993 | } | ||
| 2994 | |||
| 2995 | int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr) | ||
| 2996 | { | ||
| 2997 | struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, | ||
| 2998 | ib_fmr); | ||
| 2999 | struct bnxt_re_dev *rdev = fmr->rdev; | ||
| 3000 | int rc; | ||
| 3001 | 3228 | ||
| 3002 | rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr); | 3229 | kfree(mw); |
| 3003 | if (rc) | 3230 | atomic_dec(&rdev->mw_count); |
| 3004 | dev_err(rdev_to_dev(rdev), "Failed to free FMR"); | ||
| 3005 | |||
| 3006 | kfree(fmr); | ||
| 3007 | atomic_dec(&rdev->mr_count); | ||
| 3008 | return rc; | 3231 | return rc; |
| 3009 | } | 3232 | } |
| 3010 | 3233 | ||
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 5c3d71765454..6c160f6a5398 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h | |||
| @@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx { | |||
| 44 | u32 refcnt; | 44 | u32 refcnt; |
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | #define BNXT_RE_FENCE_BYTES 64 | ||
| 48 | struct bnxt_re_fence_data { | ||
| 49 | u32 size; | ||
| 50 | u8 va[BNXT_RE_FENCE_BYTES]; | ||
| 51 | dma_addr_t dma_addr; | ||
| 52 | struct bnxt_re_mr *mr; | ||
| 53 | struct ib_mw *mw; | ||
| 54 | struct bnxt_qplib_swqe bind_wqe; | ||
| 55 | u32 bind_rkey; | ||
| 56 | }; | ||
| 57 | |||
| 47 | struct bnxt_re_pd { | 58 | struct bnxt_re_pd { |
| 48 | struct bnxt_re_dev *rdev; | 59 | struct bnxt_re_dev *rdev; |
| 49 | struct ib_pd ib_pd; | 60 | struct ib_pd ib_pd; |
| 50 | struct bnxt_qplib_pd qplib_pd; | 61 | struct bnxt_qplib_pd qplib_pd; |
| 51 | struct bnxt_qplib_dpi dpi; | 62 | struct bnxt_qplib_dpi dpi; |
| 63 | struct bnxt_re_fence_data fence; | ||
| 52 | }; | 64 | }; |
| 53 | 65 | ||
| 54 | struct bnxt_re_ah { | 66 | struct bnxt_re_ah { |
| @@ -62,6 +74,7 @@ struct bnxt_re_qp { | |||
| 62 | struct bnxt_re_dev *rdev; | 74 | struct bnxt_re_dev *rdev; |
| 63 | struct ib_qp ib_qp; | 75 | struct ib_qp ib_qp; |
| 64 | spinlock_t sq_lock; /* protect sq */ | 76 | spinlock_t sq_lock; /* protect sq */ |
| 77 | spinlock_t rq_lock; /* protect rq */ | ||
| 65 | struct bnxt_qplib_qp qplib_qp; | 78 | struct bnxt_qplib_qp qplib_qp; |
| 66 | struct ib_umem *sumem; | 79 | struct ib_umem *sumem; |
| 67 | struct ib_umem *rumem; | 80 | struct ib_umem *rumem; |
| @@ -181,12 +194,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, | |||
| 181 | struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, | 194 | struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, |
| 182 | u32 max_num_sg); | 195 | u32 max_num_sg); |
| 183 | int bnxt_re_dereg_mr(struct ib_mr *mr); | 196 | int bnxt_re_dereg_mr(struct ib_mr *mr); |
| 184 | struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | 197 | struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, |
| 185 | struct ib_fmr_attr *fmr_attr); | 198 | struct ib_udata *udata); |
| 186 | int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, | 199 | int bnxt_re_dealloc_mw(struct ib_mw *mw); |
| 187 | u64 iova); | ||
| 188 | int bnxt_re_unmap_fmr(struct list_head *fmr_list); | ||
| 189 | int bnxt_re_dealloc_fmr(struct ib_fmr *fmr); | ||
| 190 | struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | 200 | struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
| 191 | u64 virt_addr, int mr_access_flags, | 201 | u64 virt_addr, int mr_access_flags, |
| 192 | struct ib_udata *udata); | 202 | struct ib_udata *udata); |
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 5d355401179b..1fce5e73216b 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c | |||
| @@ -507,10 +507,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) | |||
| 507 | ibdev->dereg_mr = bnxt_re_dereg_mr; | 507 | ibdev->dereg_mr = bnxt_re_dereg_mr; |
| 508 | ibdev->alloc_mr = bnxt_re_alloc_mr; | 508 | ibdev->alloc_mr = bnxt_re_alloc_mr; |
| 509 | ibdev->map_mr_sg = bnxt_re_map_mr_sg; | 509 | ibdev->map_mr_sg = bnxt_re_map_mr_sg; |
| 510 | ibdev->alloc_fmr = bnxt_re_alloc_fmr; | ||
| 511 | ibdev->map_phys_fmr = bnxt_re_map_phys_fmr; | ||
| 512 | ibdev->unmap_fmr = bnxt_re_unmap_fmr; | ||
| 513 | ibdev->dealloc_fmr = bnxt_re_dealloc_fmr; | ||
| 514 | 510 | ||
| 515 | ibdev->reg_user_mr = bnxt_re_reg_user_mr; | 511 | ibdev->reg_user_mr = bnxt_re_reg_user_mr; |
| 516 | ibdev->alloc_ucontext = bnxt_re_alloc_ucontext; | 512 | ibdev->alloc_ucontext = bnxt_re_alloc_ucontext; |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 43d08b5e9085..f05500bcdcf1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c | |||
| @@ -284,7 +284,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
| 284 | { | 284 | { |
| 285 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 285 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 286 | struct cmdq_create_qp1 req; | 286 | struct cmdq_create_qp1 req; |
| 287 | struct creq_create_qp1_resp *resp; | 287 | struct creq_create_qp1_resp resp; |
| 288 | struct bnxt_qplib_pbl *pbl; | 288 | struct bnxt_qplib_pbl *pbl; |
| 289 | struct bnxt_qplib_q *sq = &qp->sq; | 289 | struct bnxt_qplib_q *sq = &qp->sq; |
| 290 | struct bnxt_qplib_q *rq = &qp->rq; | 290 | struct bnxt_qplib_q *rq = &qp->rq; |
| @@ -394,31 +394,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
| 394 | 394 | ||
| 395 | req.pd_id = cpu_to_le32(qp->pd->id); | 395 | req.pd_id = cpu_to_le32(qp->pd->id); |
| 396 | 396 | ||
| 397 | resp = (struct creq_create_qp1_resp *) | 397 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
| 398 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 398 | (void *)&resp, NULL, 0); |
| 399 | NULL, 0); | 399 | if (rc) |
| 400 | if (!resp) { | ||
| 401 | dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed"); | ||
| 402 | rc = -EINVAL; | ||
| 403 | goto fail; | ||
| 404 | } | ||
| 405 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
| 406 | /* Cmd timed out */ | ||
| 407 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out"); | ||
| 408 | rc = -ETIMEDOUT; | ||
| 409 | goto fail; | ||
| 410 | } | ||
| 411 | if (resp->status || | ||
| 412 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 413 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed "); | ||
| 414 | dev_err(&rcfw->pdev->dev, | ||
| 415 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 416 | resp->status, le16_to_cpu(req.cookie), | ||
| 417 | le16_to_cpu(resp->cookie)); | ||
| 418 | rc = -EINVAL; | ||
| 419 | goto fail; | 400 | goto fail; |
| 420 | } | 401 | |
| 421 | qp->id = le32_to_cpu(resp->xid); | 402 | qp->id = le32_to_cpu(resp.xid); |
| 422 | qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; | 403 | qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; |
| 423 | sq->flush_in_progress = false; | 404 | sq->flush_in_progress = false; |
| 424 | rq->flush_in_progress = false; | 405 | rq->flush_in_progress = false; |
| @@ -442,7 +423,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
| 442 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 423 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 443 | struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; | 424 | struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; |
| 444 | struct cmdq_create_qp req; | 425 | struct cmdq_create_qp req; |
| 445 | struct creq_create_qp_resp *resp; | 426 | struct creq_create_qp_resp resp; |
| 446 | struct bnxt_qplib_pbl *pbl; | 427 | struct bnxt_qplib_pbl *pbl; |
| 447 | struct sq_psn_search **psn_search_ptr; | 428 | struct sq_psn_search **psn_search_ptr; |
| 448 | unsigned long int psn_search, poff = 0; | 429 | unsigned long int psn_search, poff = 0; |
| @@ -627,31 +608,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
| 627 | } | 608 | } |
| 628 | req.pd_id = cpu_to_le32(qp->pd->id); | 609 | req.pd_id = cpu_to_le32(qp->pd->id); |
| 629 | 610 | ||
| 630 | resp = (struct creq_create_qp_resp *) | 611 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
| 631 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 612 | (void *)&resp, NULL, 0); |
| 632 | NULL, 0); | 613 | if (rc) |
| 633 | if (!resp) { | ||
| 634 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed"); | ||
| 635 | rc = -EINVAL; | ||
| 636 | goto fail; | ||
| 637 | } | ||
| 638 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
| 639 | /* Cmd timed out */ | ||
| 640 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out"); | ||
| 641 | rc = -ETIMEDOUT; | ||
| 642 | goto fail; | ||
| 643 | } | ||
| 644 | if (resp->status || | ||
| 645 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 646 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed "); | ||
| 647 | dev_err(&rcfw->pdev->dev, | ||
| 648 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 649 | resp->status, le16_to_cpu(req.cookie), | ||
| 650 | le16_to_cpu(resp->cookie)); | ||
| 651 | rc = -EINVAL; | ||
| 652 | goto fail; | 614 | goto fail; |
| 653 | } | 615 | |
| 654 | qp->id = le32_to_cpu(resp->xid); | 616 | qp->id = le32_to_cpu(resp.xid); |
| 655 | qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; | 617 | qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; |
| 656 | sq->flush_in_progress = false; | 618 | sq->flush_in_progress = false; |
| 657 | rq->flush_in_progress = false; | 619 | rq->flush_in_progress = false; |
| @@ -769,10 +731,11 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
| 769 | { | 731 | { |
| 770 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 732 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 771 | struct cmdq_modify_qp req; | 733 | struct cmdq_modify_qp req; |
| 772 | struct creq_modify_qp_resp *resp; | 734 | struct creq_modify_qp_resp resp; |
| 773 | u16 cmd_flags = 0, pkey; | 735 | u16 cmd_flags = 0, pkey; |
| 774 | u32 temp32[4]; | 736 | u32 temp32[4]; |
| 775 | u32 bmask; | 737 | u32 bmask; |
| 738 | int rc; | ||
| 776 | 739 | ||
| 777 | RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); | 740 | RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); |
| 778 | 741 | ||
| @@ -862,27 +825,10 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
| 862 | 825 | ||
| 863 | req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); | 826 | req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); |
| 864 | 827 | ||
| 865 | resp = (struct creq_modify_qp_resp *) | 828 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
| 866 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 829 | (void *)&resp, NULL, 0); |
| 867 | NULL, 0); | 830 | if (rc) |
| 868 | if (!resp) { | 831 | return rc; |
| 869 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed"); | ||
| 870 | return -EINVAL; | ||
| 871 | } | ||
| 872 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
| 873 | /* Cmd timed out */ | ||
| 874 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out"); | ||
| 875 | return -ETIMEDOUT; | ||
| 876 | } | ||
| 877 | if (resp->status || | ||
| 878 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 879 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed "); | ||
| 880 | dev_err(&rcfw->pdev->dev, | ||
| 881 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 882 | resp->status, le16_to_cpu(req.cookie), | ||
| 883 | le16_to_cpu(resp->cookie)); | ||
| 884 | return -EINVAL; | ||
| 885 | } | ||
| 886 | qp->cur_qp_state = qp->state; | 832 | qp->cur_qp_state = qp->state; |
| 887 | return 0; | 833 | return 0; |
| 888 | } | 834 | } |
| @@ -891,37 +837,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
| 891 | { | 837 | { |
| 892 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 838 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 893 | struct cmdq_query_qp req; | 839 | struct cmdq_query_qp req; |
| 894 | struct creq_query_qp_resp *resp; | 840 | struct creq_query_qp_resp resp; |
| 841 | struct bnxt_qplib_rcfw_sbuf *sbuf; | ||
| 895 | struct creq_query_qp_resp_sb *sb; | 842 | struct creq_query_qp_resp_sb *sb; |
| 896 | u16 cmd_flags = 0; | 843 | u16 cmd_flags = 0; |
| 897 | u32 temp32[4]; | 844 | u32 temp32[4]; |
| 898 | int i; | 845 | int i, rc = 0; |
| 899 | 846 | ||
| 900 | RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); | 847 | RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); |
| 901 | 848 | ||
| 849 | sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); | ||
| 850 | if (!sbuf) | ||
| 851 | return -ENOMEM; | ||
| 852 | sb = sbuf->sb; | ||
| 853 | |||
| 902 | req.qp_cid = cpu_to_le32(qp->id); | 854 | req.qp_cid = cpu_to_le32(qp->id); |
| 903 | req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; | 855 | req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; |
| 904 | resp = (struct creq_query_qp_resp *) | 856 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, |
| 905 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 857 | (void *)sbuf, 0); |
| 906 | (void **)&sb, 0); | 858 | if (rc) |
| 907 | if (!resp) { | 859 | goto bail; |
| 908 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed"); | ||
| 909 | return -EINVAL; | ||
| 910 | } | ||
| 911 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
| 912 | /* Cmd timed out */ | ||
| 913 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out"); | ||
| 914 | return -ETIMEDOUT; | ||
| 915 | } | ||
| 916 | if (resp->status || | ||
| 917 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 918 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed "); | ||
| 919 | dev_err(&rcfw->pdev->dev, | ||
| 920 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 921 | resp->status, le16_to_cpu(req.cookie), | ||
| 922 | le16_to_cpu(resp->cookie)); | ||
| 923 | return -EINVAL; | ||
| 924 | } | ||
| 925 | /* Extract the context from the side buffer */ | 860 | /* Extract the context from the side buffer */ |
| 926 | qp->state = sb->en_sqd_async_notify_state & | 861 | qp->state = sb->en_sqd_async_notify_state & |
| 927 | CREQ_QUERY_QP_RESP_SB_STATE_MASK; | 862 | CREQ_QUERY_QP_RESP_SB_STATE_MASK; |
| @@ -976,7 +911,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
| 976 | qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); | 911 | qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); |
| 977 | memcpy(qp->smac, sb->src_mac, 6); | 912 | memcpy(qp->smac, sb->src_mac, 6); |
| 978 | qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); | 913 | qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); |
| 979 | return 0; | 914 | bail: |
| 915 | bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); | ||
| 916 | return rc; | ||
| 980 | } | 917 | } |
| 981 | 918 | ||
| 982 | static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) | 919 | static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) |
| @@ -1021,34 +958,18 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, | |||
| 1021 | { | 958 | { |
| 1022 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 959 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 1023 | struct cmdq_destroy_qp req; | 960 | struct cmdq_destroy_qp req; |
| 1024 | struct creq_destroy_qp_resp *resp; | 961 | struct creq_destroy_qp_resp resp; |
| 1025 | unsigned long flags; | 962 | unsigned long flags; |
| 1026 | u16 cmd_flags = 0; | 963 | u16 cmd_flags = 0; |
| 964 | int rc; | ||
| 1027 | 965 | ||
| 1028 | RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); | 966 | RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); |
| 1029 | 967 | ||
| 1030 | req.qp_cid = cpu_to_le32(qp->id); | 968 | req.qp_cid = cpu_to_le32(qp->id); |
| 1031 | resp = (struct creq_destroy_qp_resp *) | 969 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
| 1032 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 970 | (void *)&resp, NULL, 0); |
| 1033 | NULL, 0); | 971 | if (rc) |
| 1034 | if (!resp) { | 972 | return rc; |
| 1035 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed"); | ||
| 1036 | return -EINVAL; | ||
| 1037 | } | ||
| 1038 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
| 1039 | /* Cmd timed out */ | ||
| 1040 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out"); | ||
| 1041 | return -ETIMEDOUT; | ||
| 1042 | } | ||
| 1043 | if (resp->status || | ||
| 1044 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 1045 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed "); | ||
| 1046 | dev_err(&rcfw->pdev->dev, | ||
| 1047 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 1048 | resp->status, le16_to_cpu(req.cookie), | ||
| 1049 | le16_to_cpu(resp->cookie)); | ||
| 1050 | return -EINVAL; | ||
| 1051 | } | ||
| 1052 | 973 | ||
| 1053 | /* Must walk the associated CQs to nullified the QP ptr */ | 974 | /* Must walk the associated CQs to nullified the QP ptr */ |
| 1054 | spin_lock_irqsave(&qp->scq->hwq.lock, flags); | 975 | spin_lock_irqsave(&qp->scq->hwq.lock, flags); |
| @@ -1162,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, | |||
| 1162 | rc = -EINVAL; | 1083 | rc = -EINVAL; |
| 1163 | goto done; | 1084 | goto done; |
| 1164 | } | 1085 | } |
| 1165 | if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) == | 1086 | |
| 1166 | HWQ_CMP(sq->hwq.cons, &sq->hwq)) { | 1087 | if (bnxt_qplib_queue_full(sq)) { |
| 1088 | dev_err(&sq->hwq.pdev->dev, | ||
| 1089 | "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x", | ||
| 1090 | sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements, | ||
| 1091 | sq->q_full_delta); | ||
| 1167 | rc = -ENOMEM; | 1092 | rc = -ENOMEM; |
| 1168 | goto done; | 1093 | goto done; |
| 1169 | } | 1094 | } |
| @@ -1373,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, | |||
| 1373 | } | 1298 | } |
| 1374 | 1299 | ||
| 1375 | sq->hwq.prod++; | 1300 | sq->hwq.prod++; |
| 1301 | |||
| 1302 | qp->wqe_cnt++; | ||
| 1303 | |||
| 1376 | done: | 1304 | done: |
| 1377 | return rc; | 1305 | return rc; |
| 1378 | } | 1306 | } |
| @@ -1411,8 +1339,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, | |||
| 1411 | rc = -EINVAL; | 1339 | rc = -EINVAL; |
| 1412 | goto done; | 1340 | goto done; |
| 1413 | } | 1341 | } |
| 1414 | if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) == | 1342 | if (bnxt_qplib_queue_full(rq)) { |
| 1415 | HWQ_CMP(rq->hwq.cons, &rq->hwq)) { | ||
| 1416 | dev_err(&rq->hwq.pdev->dev, | 1343 | dev_err(&rq->hwq.pdev->dev, |
| 1417 | "QPLIB: FP: QP (0x%x) RQ is full!", qp->id); | 1344 | "QPLIB: FP: QP (0x%x) RQ is full!", qp->id); |
| 1418 | rc = -EINVAL; | 1345 | rc = -EINVAL; |
| @@ -1483,7 +1410,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) | |||
| 1483 | { | 1410 | { |
| 1484 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 1411 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 1485 | struct cmdq_create_cq req; | 1412 | struct cmdq_create_cq req; |
| 1486 | struct creq_create_cq_resp *resp; | 1413 | struct creq_create_cq_resp resp; |
| 1487 | struct bnxt_qplib_pbl *pbl; | 1414 | struct bnxt_qplib_pbl *pbl; |
| 1488 | u16 cmd_flags = 0; | 1415 | u16 cmd_flags = 0; |
| 1489 | int rc; | 1416 | int rc; |
| @@ -1525,30 +1452,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) | |||
| 1525 | (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << | 1452 | (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << |
| 1526 | CMDQ_CREATE_CQ_CNQ_ID_SFT); | 1453 | CMDQ_CREATE_CQ_CNQ_ID_SFT); |
| 1527 | 1454 | ||
| 1528 | resp = (struct creq_create_cq_resp *) | 1455 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
| 1529 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 1456 | (void *)&resp, NULL, 0); |
| 1530 | NULL, 0); | 1457 | if (rc) |
| 1531 | if (!resp) { | ||
| 1532 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed"); | ||
| 1533 | return -EINVAL; | ||
| 1534 | } | ||
| 1535 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
| 1536 | /* Cmd timed out */ | ||
| 1537 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out"); | ||
| 1538 | rc = -ETIMEDOUT; | ||
| 1539 | goto fail; | ||
| 1540 | } | ||
| 1541 | if (resp->status || | ||
| 1542 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 1543 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed "); | ||
| 1544 | dev_err(&rcfw->pdev->dev, | ||
| 1545 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 1546 | resp->status, le16_to_cpu(req.cookie), | ||
| 1547 | le16_to_cpu(resp->cookie)); | ||
| 1548 | rc = -EINVAL; | ||
| 1549 | goto fail; | 1458 | goto fail; |
| 1550 | } | 1459 | |
| 1551 | cq->id = le32_to_cpu(resp->xid); | 1460 | cq->id = le32_to_cpu(resp.xid); |
| 1552 | cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; | 1461 | cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; |
| 1553 | cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; | 1462 | cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; |
| 1554 | init_waitqueue_head(&cq->waitq); | 1463 | init_waitqueue_head(&cq->waitq); |
| @@ -1566,33 +1475,17 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) | |||
| 1566 | { | 1475 | { |
| 1567 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 1476 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 1568 | struct cmdq_destroy_cq req; | 1477 | struct cmdq_destroy_cq req; |
| 1569 | struct creq_destroy_cq_resp *resp; | 1478 | struct creq_destroy_cq_resp resp; |
| 1570 | u16 cmd_flags = 0; | 1479 | u16 cmd_flags = 0; |
| 1480 | int rc; | ||
| 1571 | 1481 | ||
| 1572 | RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); | 1482 | RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); |
| 1573 | 1483 | ||
| 1574 | req.cq_cid = cpu_to_le32(cq->id); | 1484 | req.cq_cid = cpu_to_le32(cq->id); |
| 1575 | resp = (struct creq_destroy_cq_resp *) | 1485 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
| 1576 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 1486 | (void *)&resp, NULL, 0); |
| 1577 | NULL, 0); | 1487 | if (rc) |
| 1578 | if (!resp) { | 1488 | return rc; |
| 1579 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed"); | ||
| 1580 | return -EINVAL; | ||
| 1581 | } | ||
| 1582 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
| 1583 | /* Cmd timed out */ | ||
| 1584 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out"); | ||
| 1585 | return -ETIMEDOUT; | ||
| 1586 | } | ||
| 1587 | if (resp->status || | ||
| 1588 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 1589 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed "); | ||
| 1590 | dev_err(&rcfw->pdev->dev, | ||
| 1591 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 1592 | resp->status, le16_to_cpu(req.cookie), | ||
| 1593 | le16_to_cpu(resp->cookie)); | ||
| 1594 | return -EINVAL; | ||
| 1595 | } | ||
| 1596 | bnxt_qplib_free_hwq(res->pdev, &cq->hwq); | 1489 | bnxt_qplib_free_hwq(res->pdev, &cq->hwq); |
| 1597 | return 0; | 1490 | return 0; |
| 1598 | } | 1491 | } |
| @@ -1664,14 +1557,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp, | |||
| 1664 | return rc; | 1557 | return rc; |
| 1665 | } | 1558 | } |
| 1666 | 1559 | ||
| 1560 | /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) | ||
| 1561 | * CQE is track from sw_cq_cons to max_element but valid only if VALID=1 | ||
| 1562 | */ | ||
| 1563 | static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, | ||
| 1564 | u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons) | ||
| 1565 | { | ||
| 1566 | struct bnxt_qplib_q *sq = &qp->sq; | ||
| 1567 | struct bnxt_qplib_swq *swq; | ||
| 1568 | u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx; | ||
| 1569 | struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr; | ||
| 1570 | struct cq_req *peek_req_hwcqe; | ||
| 1571 | struct bnxt_qplib_qp *peek_qp; | ||
| 1572 | struct bnxt_qplib_q *peek_sq; | ||
| 1573 | int i, rc = 0; | ||
| 1574 | |||
| 1575 | /* Normal mode */ | ||
| 1576 | /* Check for the psn_search marking before completing */ | ||
| 1577 | swq = &sq->swq[sw_sq_cons]; | ||
| 1578 | if (swq->psn_search && | ||
| 1579 | le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) { | ||
| 1580 | /* Unmark */ | ||
| 1581 | swq->psn_search->flags_next_psn = cpu_to_le32 | ||
| 1582 | (le32_to_cpu(swq->psn_search->flags_next_psn) | ||
| 1583 | & ~0x80000000); | ||
| 1584 | dev_dbg(&cq->hwq.pdev->dev, | ||
| 1585 | "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n", | ||
| 1586 | cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); | ||
| 1587 | sq->condition = true; | ||
| 1588 | sq->send_phantom = true; | ||
| 1589 | |||
| 1590 | /* TODO: Only ARM if the previous SQE is ARMALL */ | ||
| 1591 | bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL); | ||
| 1592 | |||
| 1593 | rc = -EAGAIN; | ||
| 1594 | goto out; | ||
| 1595 | } | ||
| 1596 | if (sq->condition) { | ||
| 1597 | /* Peek at the completions */ | ||
| 1598 | peek_raw_cq_cons = cq->hwq.cons; | ||
| 1599 | peek_sw_cq_cons = cq_cons; | ||
| 1600 | i = cq->hwq.max_elements; | ||
| 1601 | while (i--) { | ||
| 1602 | peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq); | ||
| 1603 | peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; | ||
| 1604 | peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)] | ||
| 1605 | [CQE_IDX(peek_sw_cq_cons)]; | ||
| 1606 | /* If the next hwcqe is VALID */ | ||
| 1607 | if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons, | ||
| 1608 | cq->hwq.max_elements)) { | ||
| 1609 | /* If the next hwcqe is a REQ */ | ||
| 1610 | if ((peek_hwcqe->cqe_type_toggle & | ||
| 1611 | CQ_BASE_CQE_TYPE_MASK) == | ||
| 1612 | CQ_BASE_CQE_TYPE_REQ) { | ||
| 1613 | peek_req_hwcqe = (struct cq_req *) | ||
| 1614 | peek_hwcqe; | ||
| 1615 | peek_qp = (struct bnxt_qplib_qp *) | ||
| 1616 | ((unsigned long) | ||
| 1617 | le64_to_cpu | ||
| 1618 | (peek_req_hwcqe->qp_handle)); | ||
| 1619 | peek_sq = &peek_qp->sq; | ||
| 1620 | peek_sq_cons_idx = HWQ_CMP(le16_to_cpu( | ||
| 1621 | peek_req_hwcqe->sq_cons_idx) - 1 | ||
| 1622 | , &sq->hwq); | ||
| 1623 | /* If the hwcqe's sq's wr_id matches */ | ||
| 1624 | if (peek_sq == sq && | ||
| 1625 | sq->swq[peek_sq_cons_idx].wr_id == | ||
| 1626 | BNXT_QPLIB_FENCE_WRID) { | ||
| 1627 | /* | ||
| 1628 | * Unbreak only if the phantom | ||
| 1629 | * comes back | ||
| 1630 | */ | ||
| 1631 | dev_dbg(&cq->hwq.pdev->dev, | ||
| 1632 | "FP:Got Phantom CQE"); | ||
| 1633 | sq->condition = false; | ||
| 1634 | sq->single = true; | ||
| 1635 | rc = 0; | ||
| 1636 | goto out; | ||
| 1637 | } | ||
| 1638 | } | ||
| 1639 | /* Valid but not the phantom, so keep looping */ | ||
| 1640 | } else { | ||
| 1641 | /* Not valid yet, just exit and wait */ | ||
| 1642 | rc = -EINVAL; | ||
| 1643 | goto out; | ||
| 1644 | } | ||
| 1645 | peek_sw_cq_cons++; | ||
| 1646 | peek_raw_cq_cons++; | ||
| 1647 | } | ||
| 1648 | dev_err(&cq->hwq.pdev->dev, | ||
| 1649 | "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x", | ||
| 1650 | cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); | ||
| 1651 | rc = -EINVAL; | ||
| 1652 | } | ||
| 1653 | out: | ||
| 1654 | return rc; | ||
| 1655 | } | ||
| 1656 | |||
| 1667 | static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, | 1657 | static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, |
| 1668 | struct cq_req *hwcqe, | 1658 | struct cq_req *hwcqe, |
| 1669 | struct bnxt_qplib_cqe **pcqe, int *budget) | 1659 | struct bnxt_qplib_cqe **pcqe, int *budget, |
| 1660 | u32 cq_cons, struct bnxt_qplib_qp **lib_qp) | ||
| 1670 | { | 1661 | { |
| 1671 | struct bnxt_qplib_qp *qp; | 1662 | struct bnxt_qplib_qp *qp; |
| 1672 | struct bnxt_qplib_q *sq; | 1663 | struct bnxt_qplib_q *sq; |
| 1673 | struct bnxt_qplib_cqe *cqe; | 1664 | struct bnxt_qplib_cqe *cqe; |
| 1674 | u32 sw_cons, cqe_cons; | 1665 | u32 sw_sq_cons, cqe_sq_cons; |
| 1666 | struct bnxt_qplib_swq *swq; | ||
| 1675 | int rc = 0; | 1667 | int rc = 0; |
| 1676 | 1668 | ||
| 1677 | qp = (struct bnxt_qplib_qp *)((unsigned long) | 1669 | qp = (struct bnxt_qplib_qp *)((unsigned long) |
| @@ -1683,13 +1675,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, | |||
| 1683 | } | 1675 | } |
| 1684 | sq = &qp->sq; | 1676 | sq = &qp->sq; |
| 1685 | 1677 | ||
| 1686 | cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); | 1678 | cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); |
| 1687 | if (cqe_cons > sq->hwq.max_elements) { | 1679 | if (cqe_sq_cons > sq->hwq.max_elements) { |
| 1688 | dev_err(&cq->hwq.pdev->dev, | 1680 | dev_err(&cq->hwq.pdev->dev, |
| 1689 | "QPLIB: FP: CQ Process req reported "); | 1681 | "QPLIB: FP: CQ Process req reported "); |
| 1690 | dev_err(&cq->hwq.pdev->dev, | 1682 | dev_err(&cq->hwq.pdev->dev, |
| 1691 | "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", | 1683 | "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", |
| 1692 | cqe_cons, sq->hwq.max_elements); | 1684 | cqe_sq_cons, sq->hwq.max_elements); |
| 1693 | return -EINVAL; | 1685 | return -EINVAL; |
| 1694 | } | 1686 | } |
| 1695 | /* If we were in the middle of flushing the SQ, continue */ | 1687 | /* If we were in the middle of flushing the SQ, continue */ |
| @@ -1698,53 +1690,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, | |||
| 1698 | 1690 | ||
| 1699 | /* Require to walk the sq's swq to fabricate CQEs for all previously | 1691 | /* Require to walk the sq's swq to fabricate CQEs for all previously |
| 1700 | * signaled SWQEs due to CQE aggregation from the current sq cons | 1692 | * signaled SWQEs due to CQE aggregation from the current sq cons |
| 1701 | * to the cqe_cons | 1693 | * to the cqe_sq_cons |
| 1702 | */ | 1694 | */ |
| 1703 | cqe = *pcqe; | 1695 | cqe = *pcqe; |
| 1704 | while (*budget) { | 1696 | while (*budget) { |
| 1705 | sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); | 1697 | sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); |
| 1706 | if (sw_cons == cqe_cons) | 1698 | if (sw_sq_cons == cqe_sq_cons) |
| 1699 | /* Done */ | ||
| 1707 | break; | 1700 | break; |
| 1701 | |||
| 1702 | swq = &sq->swq[sw_sq_cons]; | ||
| 1708 | memset(cqe, 0, sizeof(*cqe)); | 1703 | memset(cqe, 0, sizeof(*cqe)); |
| 1709 | cqe->opcode = CQ_BASE_CQE_TYPE_REQ; | 1704 | cqe->opcode = CQ_BASE_CQE_TYPE_REQ; |
| 1710 | cqe->qp_handle = (u64)(unsigned long)qp; | 1705 | cqe->qp_handle = (u64)(unsigned long)qp; |
| 1711 | cqe->src_qp = qp->id; | 1706 | cqe->src_qp = qp->id; |
| 1712 | cqe->wr_id = sq->swq[sw_cons].wr_id; | 1707 | cqe->wr_id = swq->wr_id; |
| 1713 | cqe->type = sq->swq[sw_cons].type; | 1708 | if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID) |
| 1709 | goto skip; | ||
| 1710 | cqe->type = swq->type; | ||
| 1714 | 1711 | ||
| 1715 | /* For the last CQE, check for status. For errors, regardless | 1712 | /* For the last CQE, check for status. For errors, regardless |
| 1716 | * of the request being signaled or not, it must complete with | 1713 | * of the request being signaled or not, it must complete with |
| 1717 | * the hwcqe error status | 1714 | * the hwcqe error status |
| 1718 | */ | 1715 | */ |
| 1719 | if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons && | 1716 | if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons && |
| 1720 | hwcqe->status != CQ_REQ_STATUS_OK) { | 1717 | hwcqe->status != CQ_REQ_STATUS_OK) { |
| 1721 | cqe->status = hwcqe->status; | 1718 | cqe->status = hwcqe->status; |
| 1722 | dev_err(&cq->hwq.pdev->dev, | 1719 | dev_err(&cq->hwq.pdev->dev, |
| 1723 | "QPLIB: FP: CQ Processed Req "); | 1720 | "QPLIB: FP: CQ Processed Req "); |
| 1724 | dev_err(&cq->hwq.pdev->dev, | 1721 | dev_err(&cq->hwq.pdev->dev, |
| 1725 | "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", | 1722 | "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", |
| 1726 | sw_cons, cqe->wr_id, cqe->status); | 1723 | sw_sq_cons, cqe->wr_id, cqe->status); |
| 1727 | cqe++; | 1724 | cqe++; |
| 1728 | (*budget)--; | 1725 | (*budget)--; |
| 1729 | sq->flush_in_progress = true; | 1726 | sq->flush_in_progress = true; |
| 1730 | /* Must block new posting of SQ and RQ */ | 1727 | /* Must block new posting of SQ and RQ */ |
| 1731 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; | 1728 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; |
| 1729 | sq->condition = false; | ||
| 1730 | sq->single = false; | ||
| 1732 | } else { | 1731 | } else { |
| 1733 | if (sq->swq[sw_cons].flags & | 1732 | if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { |
| 1734 | SQ_SEND_FLAGS_SIGNAL_COMP) { | 1733 | /* Before we complete, do WA 9060 */ |
| 1734 | if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, | ||
| 1735 | cqe_sq_cons)) { | ||
| 1736 | *lib_qp = qp; | ||
| 1737 | goto out; | ||
| 1738 | } | ||
| 1735 | cqe->status = CQ_REQ_STATUS_OK; | 1739 | cqe->status = CQ_REQ_STATUS_OK; |
| 1736 | cqe++; | 1740 | cqe++; |
| 1737 | (*budget)--; | 1741 | (*budget)--; |
| 1738 | } | 1742 | } |
| 1739 | } | 1743 | } |
| 1744 | skip: | ||
| 1740 | sq->hwq.cons++; | 1745 | sq->hwq.cons++; |
| 1746 | if (sq->single) | ||
| 1747 | break; | ||
| 1741 | } | 1748 | } |
| 1749 | out: | ||
| 1742 | *pcqe = cqe; | 1750 | *pcqe = cqe; |
| 1743 | if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) { | 1751 | if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) { |
| 1744 | /* Out of budget */ | 1752 | /* Out of budget */ |
| 1745 | rc = -EAGAIN; | 1753 | rc = -EAGAIN; |
| 1746 | goto done; | 1754 | goto done; |
| 1747 | } | 1755 | } |
| 1756 | /* | ||
| 1757 | * Back to normal completion mode only after it has completed all of | ||
| 1758 | * the WC for this CQE | ||
| 1759 | */ | ||
| 1760 | sq->single = false; | ||
| 1748 | if (!sq->flush_in_progress) | 1761 | if (!sq->flush_in_progress) |
| 1749 | goto done; | 1762 | goto done; |
| 1750 | flush: | 1763 | flush: |
| @@ -2074,7 +2087,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq, | |||
| 2074 | } | 2087 | } |
| 2075 | 2088 | ||
| 2076 | int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, | 2089 | int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, |
| 2077 | int num_cqes) | 2090 | int num_cqes, struct bnxt_qplib_qp **lib_qp) |
| 2078 | { | 2091 | { |
| 2079 | struct cq_base *hw_cqe, **hw_cqe_ptr; | 2092 | struct cq_base *hw_cqe, **hw_cqe_ptr; |
| 2080 | unsigned long flags; | 2093 | unsigned long flags; |
| @@ -2099,7 +2112,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, | |||
| 2099 | case CQ_BASE_CQE_TYPE_REQ: | 2112 | case CQ_BASE_CQE_TYPE_REQ: |
| 2100 | rc = bnxt_qplib_cq_process_req(cq, | 2113 | rc = bnxt_qplib_cq_process_req(cq, |
| 2101 | (struct cq_req *)hw_cqe, | 2114 | (struct cq_req *)hw_cqe, |
| 2102 | &cqe, &budget); | 2115 | &cqe, &budget, |
| 2116 | sw_cons, lib_qp); | ||
| 2103 | break; | 2117 | break; |
| 2104 | case CQ_BASE_CQE_TYPE_RES_RC: | 2118 | case CQ_BASE_CQE_TYPE_RES_RC: |
| 2105 | rc = bnxt_qplib_cq_process_res_rc(cq, | 2119 | rc = bnxt_qplib_cq_process_res_rc(cq, |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index f0150f8da1e3..36b7b7db0e3f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h | |||
| @@ -88,6 +88,7 @@ struct bnxt_qplib_swq { | |||
| 88 | 88 | ||
| 89 | struct bnxt_qplib_swqe { | 89 | struct bnxt_qplib_swqe { |
| 90 | /* General */ | 90 | /* General */ |
| 91 | #define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */ | ||
| 91 | u64 wr_id; | 92 | u64 wr_id; |
| 92 | u8 reqs_type; | 93 | u8 reqs_type; |
| 93 | u8 type; | 94 | u8 type; |
| @@ -216,9 +217,16 @@ struct bnxt_qplib_q { | |||
| 216 | struct scatterlist *sglist; | 217 | struct scatterlist *sglist; |
| 217 | u32 nmap; | 218 | u32 nmap; |
| 218 | u32 max_wqe; | 219 | u32 max_wqe; |
| 220 | u16 q_full_delta; | ||
| 219 | u16 max_sge; | 221 | u16 max_sge; |
| 220 | u32 psn; | 222 | u32 psn; |
| 221 | bool flush_in_progress; | 223 | bool flush_in_progress; |
| 224 | bool condition; | ||
| 225 | bool single; | ||
| 226 | bool send_phantom; | ||
| 227 | u32 phantom_wqe_cnt; | ||
| 228 | u32 phantom_cqe_cnt; | ||
| 229 | u32 next_cq_cons; | ||
| 222 | }; | 230 | }; |
| 223 | 231 | ||
| 224 | struct bnxt_qplib_qp { | 232 | struct bnxt_qplib_qp { |
| @@ -242,6 +250,7 @@ struct bnxt_qplib_qp { | |||
| 242 | u8 timeout; | 250 | u8 timeout; |
| 243 | u8 retry_cnt; | 251 | u8 retry_cnt; |
| 244 | u8 rnr_retry; | 252 | u8 rnr_retry; |
| 253 | u64 wqe_cnt; | ||
| 245 | u32 min_rnr_timer; | 254 | u32 min_rnr_timer; |
| 246 | u32 max_rd_atomic; | 255 | u32 max_rd_atomic; |
| 247 | u32 max_dest_rd_atomic; | 256 | u32 max_dest_rd_atomic; |
| @@ -301,6 +310,13 @@ struct bnxt_qplib_qp { | |||
| 301 | (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ | 310 | (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ |
| 302 | !((raw_cons) & (cp_bit))) | 311 | !((raw_cons) & (cp_bit))) |
| 303 | 312 | ||
| 313 | static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q) | ||
| 314 | { | ||
| 315 | return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta), | ||
| 316 | &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons, | ||
| 317 | &qplib_q->hwq); | ||
| 318 | } | ||
| 319 | |||
| 304 | struct bnxt_qplib_cqe { | 320 | struct bnxt_qplib_cqe { |
| 305 | u8 status; | 321 | u8 status; |
| 306 | u8 type; | 322 | u8 type; |
| @@ -432,7 +448,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, | |||
| 432 | int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); | 448 | int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); |
| 433 | int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); | 449 | int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); |
| 434 | int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, | 450 | int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, |
| 435 | int num); | 451 | int num, struct bnxt_qplib_qp **qp); |
| 436 | void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); | 452 | void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); |
| 437 | void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); | 453 | void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); |
| 438 | int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); | 454 | int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 23fb7260662b..16e42754dbec 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | |||
| @@ -39,72 +39,55 @@ | |||
| 39 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
| 40 | #include <linux/pci.h> | 40 | #include <linux/pci.h> |
| 41 | #include <linux/prefetch.h> | 41 | #include <linux/prefetch.h> |
| 42 | #include <linux/delay.h> | ||
| 43 | |||
| 42 | #include "roce_hsi.h" | 44 | #include "roce_hsi.h" |
| 43 | #include "qplib_res.h" | 45 | #include "qplib_res.h" |
| 44 | #include "qplib_rcfw.h" | 46 | #include "qplib_rcfw.h" |
| 45 | static void bnxt_qplib_service_creq(unsigned long data); | 47 | static void bnxt_qplib_service_creq(unsigned long data); |
| 46 | 48 | ||
| 47 | /* Hardware communication channel */ | 49 | /* Hardware communication channel */ |
| 48 | int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) | 50 | static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) |
| 49 | { | 51 | { |
| 50 | u16 cbit; | 52 | u16 cbit; |
| 51 | int rc; | 53 | int rc; |
| 52 | 54 | ||
| 53 | cookie &= RCFW_MAX_COOKIE_VALUE; | ||
| 54 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; | 55 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; |
| 55 | if (!test_bit(cbit, rcfw->cmdq_bitmap)) | ||
| 56 | dev_warn(&rcfw->pdev->dev, | ||
| 57 | "QPLIB: CMD bit %d for cookie 0x%x is not set?", | ||
| 58 | cbit, cookie); | ||
| 59 | |||
| 60 | rc = wait_event_timeout(rcfw->waitq, | 56 | rc = wait_event_timeout(rcfw->waitq, |
| 61 | !test_bit(cbit, rcfw->cmdq_bitmap), | 57 | !test_bit(cbit, rcfw->cmdq_bitmap), |
| 62 | msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); | 58 | msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); |
| 63 | if (!rc) { | 59 | return rc ? 0 : -ETIMEDOUT; |
| 64 | dev_warn(&rcfw->pdev->dev, | ||
| 65 | "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n", | ||
| 66 | RCFW_CMD_WAIT_TIME_MS, cookie); | ||
| 67 | } | ||
| 68 | |||
| 69 | return rc; | ||
| 70 | }; | 60 | }; |
| 71 | 61 | ||
| 72 | int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) | 62 | static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) |
| 73 | { | 63 | { |
| 74 | u32 count = -1; | 64 | u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT; |
| 75 | u16 cbit; | 65 | u16 cbit; |
| 76 | 66 | ||
| 77 | cookie &= RCFW_MAX_COOKIE_VALUE; | ||
| 78 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; | 67 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; |
| 79 | if (!test_bit(cbit, rcfw->cmdq_bitmap)) | 68 | if (!test_bit(cbit, rcfw->cmdq_bitmap)) |
| 80 | goto done; | 69 | goto done; |
| 81 | do { | 70 | do { |
| 71 | mdelay(1); /* 1m sec */ | ||
| 82 | bnxt_qplib_service_creq((unsigned long)rcfw); | 72 | bnxt_qplib_service_creq((unsigned long)rcfw); |
| 83 | } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); | 73 | } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); |
| 84 | done: | 74 | done: |
| 85 | return count; | 75 | return count ? 0 : -ETIMEDOUT; |
| 86 | }; | 76 | }; |
| 87 | 77 | ||
| 88 | void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, | 78 | static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, |
| 89 | struct cmdq_base *req, void **crsbe, | 79 | struct creq_base *resp, void *sb, u8 is_block) |
| 90 | u8 is_block) | ||
| 91 | { | 80 | { |
| 92 | struct bnxt_qplib_crsq *crsq = &rcfw->crsq; | ||
| 93 | struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; | 81 | struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; |
| 94 | struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; | 82 | struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; |
| 95 | struct bnxt_qplib_hwq *crsb = &rcfw->crsb; | 83 | struct bnxt_qplib_crsq *crsqe; |
| 96 | struct bnxt_qplib_crsqe *crsqe = NULL; | ||
| 97 | struct bnxt_qplib_crsbe **crsb_ptr; | ||
| 98 | u32 sw_prod, cmdq_prod; | 84 | u32 sw_prod, cmdq_prod; |
| 99 | u8 retry_cnt = 0xFF; | ||
| 100 | dma_addr_t dma_addr; | ||
| 101 | unsigned long flags; | 85 | unsigned long flags; |
| 102 | u32 size, opcode; | 86 | u32 size, opcode; |
| 103 | u16 cookie, cbit; | 87 | u16 cookie, cbit; |
| 104 | int pg, idx; | 88 | int pg, idx; |
| 105 | u8 *preq; | 89 | u8 *preq; |
| 106 | 90 | ||
| 107 | retry: | ||
| 108 | opcode = req->opcode; | 91 | opcode = req->opcode; |
| 109 | if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && | 92 | if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && |
| 110 | (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && | 93 | (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && |
| @@ -112,63 +95,50 @@ retry: | |||
| 112 | dev_err(&rcfw->pdev->dev, | 95 | dev_err(&rcfw->pdev->dev, |
| 113 | "QPLIB: RCFW not initialized, reject opcode 0x%x", | 96 | "QPLIB: RCFW not initialized, reject opcode 0x%x", |
| 114 | opcode); | 97 | opcode); |
| 115 | return NULL; | 98 | return -EINVAL; |
| 116 | } | 99 | } |
| 117 | 100 | ||
| 118 | if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && | 101 | if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && |
| 119 | opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { | 102 | opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { |
| 120 | dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); | 103 | dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); |
| 121 | return NULL; | 104 | return -EINVAL; |
| 122 | } | 105 | } |
| 123 | 106 | ||
| 124 | /* Cmdq are in 16-byte units, each request can consume 1 or more | 107 | /* Cmdq are in 16-byte units, each request can consume 1 or more |
| 125 | * cmdqe | 108 | * cmdqe |
| 126 | */ | 109 | */ |
| 127 | spin_lock_irqsave(&cmdq->lock, flags); | 110 | spin_lock_irqsave(&cmdq->lock, flags); |
| 128 | if (req->cmd_size > cmdq->max_elements - | 111 | if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { |
| 129 | ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) & | ||
| 130 | (cmdq->max_elements - 1))) { | ||
| 131 | dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); | 112 | dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); |
| 132 | spin_unlock_irqrestore(&cmdq->lock, flags); | 113 | spin_unlock_irqrestore(&cmdq->lock, flags); |
| 133 | 114 | return -EAGAIN; | |
| 134 | if (!retry_cnt--) | ||
| 135 | return NULL; | ||
| 136 | goto retry; | ||
| 137 | } | 115 | } |
| 138 | 116 | ||
| 139 | retry_cnt = 0xFF; | ||
| 140 | 117 | ||
| 141 | cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE; | 118 | cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE; |
| 142 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; | 119 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; |
| 143 | if (is_block) | 120 | if (is_block) |
| 144 | cookie |= RCFW_CMD_IS_BLOCKING; | 121 | cookie |= RCFW_CMD_IS_BLOCKING; |
| 122 | |||
| 123 | set_bit(cbit, rcfw->cmdq_bitmap); | ||
| 145 | req->cookie = cpu_to_le16(cookie); | 124 | req->cookie = cpu_to_le16(cookie); |
| 146 | if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) { | 125 | crsqe = &rcfw->crsqe_tbl[cbit]; |
| 147 | dev_err(&rcfw->pdev->dev, | 126 | if (crsqe->resp) { |
| 148 | "QPLIB: RCFW MAX outstanding cmd reached!"); | ||
| 149 | atomic_dec(&rcfw->seq_num); | ||
| 150 | spin_unlock_irqrestore(&cmdq->lock, flags); | 127 | spin_unlock_irqrestore(&cmdq->lock, flags); |
| 151 | 128 | return -EBUSY; | |
| 152 | if (!retry_cnt--) | ||
| 153 | return NULL; | ||
| 154 | goto retry; | ||
| 155 | } | 129 | } |
| 156 | /* Reserve a resp buffer slot if requested */ | 130 | memset(resp, 0, sizeof(*resp)); |
| 157 | if (req->resp_size && crsbe) { | 131 | crsqe->resp = (struct creq_qp_event *)resp; |
| 158 | spin_lock(&crsb->lock); | 132 | crsqe->resp->cookie = req->cookie; |
| 159 | sw_prod = HWQ_CMP(crsb->prod, crsb); | 133 | crsqe->req_size = req->cmd_size; |
| 160 | crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr; | 134 | if (req->resp_size && sb) { |
| 161 | *crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)] | 135 | struct bnxt_qplib_rcfw_sbuf *sbuf = sb; |
| 162 | [get_crsb_idx(sw_prod)]; | 136 | |
| 163 | bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr); | 137 | req->resp_addr = cpu_to_le64(sbuf->dma_addr); |
| 164 | req->resp_addr = cpu_to_le64(dma_addr); | 138 | req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / |
| 165 | crsb->prod++; | 139 | BNXT_QPLIB_CMDQE_UNITS; |
| 166 | spin_unlock(&crsb->lock); | ||
| 167 | |||
| 168 | req->resp_size = (sizeof(struct bnxt_qplib_crsbe) + | ||
| 169 | BNXT_QPLIB_CMDQE_UNITS - 1) / | ||
| 170 | BNXT_QPLIB_CMDQE_UNITS; | ||
| 171 | } | 140 | } |
| 141 | |||
| 172 | cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; | 142 | cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; |
| 173 | preq = (u8 *)req; | 143 | preq = (u8 *)req; |
| 174 | size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; | 144 | size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; |
| @@ -190,23 +160,24 @@ retry: | |||
| 190 | preq += min_t(u32, size, sizeof(*cmdqe)); | 160 | preq += min_t(u32, size, sizeof(*cmdqe)); |
| 191 | size -= min_t(u32, size, sizeof(*cmdqe)); | 161 | size -= min_t(u32, size, sizeof(*cmdqe)); |
| 192 | cmdq->prod++; | 162 | cmdq->prod++; |
| 163 | rcfw->seq_num++; | ||
| 193 | } while (size > 0); | 164 | } while (size > 0); |
| 194 | 165 | ||
| 166 | rcfw->seq_num++; | ||
| 167 | |||
| 195 | cmdq_prod = cmdq->prod; | 168 | cmdq_prod = cmdq->prod; |
| 196 | if (rcfw->flags & FIRMWARE_FIRST_FLAG) { | 169 | if (rcfw->flags & FIRMWARE_FIRST_FLAG) { |
| 197 | /* The very first doorbell write is required to set this flag | 170 | /* The very first doorbell write |
| 198 | * which prompts the FW to reset its internal pointers | 171 | * is required to set this flag |
| 172 | * which prompts the FW to reset | ||
| 173 | * its internal pointers | ||
| 199 | */ | 174 | */ |
| 200 | cmdq_prod |= FIRMWARE_FIRST_FLAG; | 175 | cmdq_prod |= FIRMWARE_FIRST_FLAG; |
| 201 | rcfw->flags &= ~FIRMWARE_FIRST_FLAG; | 176 | rcfw->flags &= ~FIRMWARE_FIRST_FLAG; |
| 202 | } | 177 | } |
| 203 | sw_prod = HWQ_CMP(crsq->prod, crsq); | ||
| 204 | crsqe = &crsq->crsq[sw_prod]; | ||
| 205 | memset(crsqe, 0, sizeof(*crsqe)); | ||
| 206 | crsq->prod++; | ||
| 207 | crsqe->req_size = req->cmd_size; | ||
| 208 | 178 | ||
| 209 | /* ring CMDQ DB */ | 179 | /* ring CMDQ DB */ |
| 180 | wmb(); | ||
| 210 | writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + | 181 | writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + |
| 211 | rcfw->cmdq_bar_reg_prod_off); | 182 | rcfw->cmdq_bar_reg_prod_off); |
| 212 | writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + | 183 | writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + |
| @@ -214,9 +185,56 @@ retry: | |||
| 214 | done: | 185 | done: |
| 215 | spin_unlock_irqrestore(&cmdq->lock, flags); | 186 | spin_unlock_irqrestore(&cmdq->lock, flags); |
| 216 | /* Return the CREQ response pointer */ | 187 | /* Return the CREQ response pointer */ |
| 217 | return crsqe ? &crsqe->qp_event : NULL; | 188 | return 0; |
| 218 | } | 189 | } |
| 219 | 190 | ||
| 191 | int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, | ||
| 192 | struct cmdq_base *req, | ||
| 193 | struct creq_base *resp, | ||
| 194 | void *sb, u8 is_block) | ||
| 195 | { | ||
| 196 | struct creq_qp_event *evnt = (struct creq_qp_event *)resp; | ||
| 197 | u16 cookie; | ||
| 198 | u8 opcode, retry_cnt = 0xFF; | ||
| 199 | int rc = 0; | ||
| 200 | |||
| 201 | do { | ||
| 202 | opcode = req->opcode; | ||
| 203 | rc = __send_message(rcfw, req, resp, sb, is_block); | ||
| 204 | cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; | ||
| 205 | if (!rc) | ||
| 206 | break; | ||
| 207 | |||
| 208 | if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { | ||
| 209 | /* send failed */ | ||
| 210 | dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed", | ||
| 211 | cookie, opcode); | ||
| 212 | return rc; | ||
| 213 | } | ||
| 214 | is_block ? mdelay(1) : usleep_range(500, 1000); | ||
| 215 | |||
| 216 | } while (retry_cnt--); | ||
| 217 | |||
| 218 | if (is_block) | ||
| 219 | rc = __block_for_resp(rcfw, cookie); | ||
| 220 | else | ||
| 221 | rc = __wait_for_resp(rcfw, cookie); | ||
| 222 | if (rc) { | ||
| 223 | /* timed out */ | ||
| 224 | dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", | ||
| 225 | cookie, opcode, RCFW_CMD_WAIT_TIME_MS); | ||
| 226 | return rc; | ||
| 227 | } | ||
| 228 | |||
| 229 | if (evnt->status) { | ||
| 230 | /* failed with status */ | ||
| 231 | dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x", | ||
| 232 | cookie, opcode, evnt->status); | ||
| 233 | rc = -EFAULT; | ||
| 234 | } | ||
| 235 | |||
| 236 | return rc; | ||
| 237 | } | ||
| 220 | /* Completions */ | 238 | /* Completions */ |
| 221 | static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, | 239 | static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, |
| 222 | struct creq_func_event *func_event) | 240 | struct creq_func_event *func_event) |
| @@ -260,12 +278,12 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, | |||
| 260 | static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, | 278 | static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, |
| 261 | struct creq_qp_event *qp_event) | 279 | struct creq_qp_event *qp_event) |
| 262 | { | 280 | { |
| 263 | struct bnxt_qplib_crsq *crsq = &rcfw->crsq; | ||
| 264 | struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; | 281 | struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; |
| 265 | struct bnxt_qplib_crsqe *crsqe; | 282 | struct bnxt_qplib_crsq *crsqe; |
| 266 | u16 cbit, cookie, blocked = 0; | ||
| 267 | unsigned long flags; | 283 | unsigned long flags; |
| 268 | u32 sw_cons; | 284 | u16 cbit, blocked = 0; |
| 285 | u16 cookie; | ||
| 286 | __le16 mcookie; | ||
| 269 | 287 | ||
| 270 | switch (qp_event->event) { | 288 | switch (qp_event->event) { |
| 271 | case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: | 289 | case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: |
| @@ -275,24 +293,31 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, | |||
| 275 | default: | 293 | default: |
| 276 | /* Command Response */ | 294 | /* Command Response */ |
| 277 | spin_lock_irqsave(&cmdq->lock, flags); | 295 | spin_lock_irqsave(&cmdq->lock, flags); |
| 278 | sw_cons = HWQ_CMP(crsq->cons, crsq); | 296 | cookie = le16_to_cpu(qp_event->cookie); |
| 279 | crsqe = &crsq->crsq[sw_cons]; | 297 | mcookie = qp_event->cookie; |
| 280 | crsq->cons++; | ||
| 281 | memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event)); | ||
| 282 | |||
| 283 | cookie = le16_to_cpu(crsqe->qp_event.cookie); | ||
| 284 | blocked = cookie & RCFW_CMD_IS_BLOCKING; | 298 | blocked = cookie & RCFW_CMD_IS_BLOCKING; |
| 285 | cookie &= RCFW_MAX_COOKIE_VALUE; | 299 | cookie &= RCFW_MAX_COOKIE_VALUE; |
| 286 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; | 300 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; |
| 301 | crsqe = &rcfw->crsqe_tbl[cbit]; | ||
| 302 | if (crsqe->resp && | ||
| 303 | crsqe->resp->cookie == mcookie) { | ||
| 304 | memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); | ||
| 305 | crsqe->resp = NULL; | ||
| 306 | } else { | ||
| 307 | dev_err(&rcfw->pdev->dev, | ||
| 308 | "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x", | ||
| 309 | crsqe->resp ? "mismatch" : "collision", | ||
| 310 | crsqe->resp ? crsqe->resp->cookie : 0, mcookie); | ||
| 311 | } | ||
| 287 | if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) | 312 | if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) |
| 288 | dev_warn(&rcfw->pdev->dev, | 313 | dev_warn(&rcfw->pdev->dev, |
| 289 | "QPLIB: CMD bit %d was not requested", cbit); | 314 | "QPLIB: CMD bit %d was not requested", cbit); |
| 290 | |||
| 291 | cmdq->cons += crsqe->req_size; | 315 | cmdq->cons += crsqe->req_size; |
| 292 | spin_unlock_irqrestore(&cmdq->lock, flags); | 316 | crsqe->req_size = 0; |
| 317 | |||
| 293 | if (!blocked) | 318 | if (!blocked) |
| 294 | wake_up(&rcfw->waitq); | 319 | wake_up(&rcfw->waitq); |
| 295 | break; | 320 | spin_unlock_irqrestore(&cmdq->lock, flags); |
| 296 | } | 321 | } |
| 297 | return 0; | 322 | return 0; |
| 298 | } | 323 | } |
| @@ -305,12 +330,12 @@ static void bnxt_qplib_service_creq(unsigned long data) | |||
| 305 | struct creq_base *creqe, **creq_ptr; | 330 | struct creq_base *creqe, **creq_ptr; |
| 306 | u32 sw_cons, raw_cons; | 331 | u32 sw_cons, raw_cons; |
| 307 | unsigned long flags; | 332 | unsigned long flags; |
| 308 | u32 type; | 333 | u32 type, budget = CREQ_ENTRY_POLL_BUDGET; |
| 309 | 334 | ||
| 310 | /* Service the CREQ until empty */ | 335 | /* Service the CREQ until budget is over */ |
| 311 | spin_lock_irqsave(&creq->lock, flags); | 336 | spin_lock_irqsave(&creq->lock, flags); |
| 312 | raw_cons = creq->cons; | 337 | raw_cons = creq->cons; |
| 313 | while (1) { | 338 | while (budget > 0) { |
| 314 | sw_cons = HWQ_CMP(raw_cons, creq); | 339 | sw_cons = HWQ_CMP(raw_cons, creq); |
| 315 | creq_ptr = (struct creq_base **)creq->pbl_ptr; | 340 | creq_ptr = (struct creq_base **)creq->pbl_ptr; |
| 316 | creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; | 341 | creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; |
| @@ -320,15 +345,9 @@ static void bnxt_qplib_service_creq(unsigned long data) | |||
| 320 | type = creqe->type & CREQ_BASE_TYPE_MASK; | 345 | type = creqe->type & CREQ_BASE_TYPE_MASK; |
| 321 | switch (type) { | 346 | switch (type) { |
| 322 | case CREQ_BASE_TYPE_QP_EVENT: | 347 | case CREQ_BASE_TYPE_QP_EVENT: |
| 323 | if (!bnxt_qplib_process_qp_event | 348 | bnxt_qplib_process_qp_event |
| 324 | (rcfw, (struct creq_qp_event *)creqe)) | 349 | (rcfw, (struct creq_qp_event *)creqe); |
| 325 | rcfw->creq_qp_event_processed++; | 350 | rcfw->creq_qp_event_processed++; |
| 326 | else { | ||
| 327 | dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with"); | ||
| 328 | dev_warn(&rcfw->pdev->dev, | ||
| 329 | "QPLIB: type = 0x%x not handled", | ||
| 330 | type); | ||
| 331 | } | ||
| 332 | break; | 351 | break; |
| 333 | case CREQ_BASE_TYPE_FUNC_EVENT: | 352 | case CREQ_BASE_TYPE_FUNC_EVENT: |
| 334 | if (!bnxt_qplib_process_func_event | 353 | if (!bnxt_qplib_process_func_event |
| @@ -346,7 +365,9 @@ static void bnxt_qplib_service_creq(unsigned long data) | |||
| 346 | break; | 365 | break; |
| 347 | } | 366 | } |
| 348 | raw_cons++; | 367 | raw_cons++; |
| 368 | budget--; | ||
| 349 | } | 369 | } |
| 370 | |||
| 350 | if (creq->cons != raw_cons) { | 371 | if (creq->cons != raw_cons) { |
| 351 | creq->cons = raw_cons; | 372 | creq->cons = raw_cons; |
| 352 | CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, | 373 | CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, |
| @@ -375,23 +396,16 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) | |||
| 375 | /* RCFW */ | 396 | /* RCFW */ |
| 376 | int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) | 397 | int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) |
| 377 | { | 398 | { |
| 378 | struct creq_deinitialize_fw_resp *resp; | ||
| 379 | struct cmdq_deinitialize_fw req; | 399 | struct cmdq_deinitialize_fw req; |
| 400 | struct creq_deinitialize_fw_resp resp; | ||
| 380 | u16 cmd_flags = 0; | 401 | u16 cmd_flags = 0; |
| 402 | int rc; | ||
| 381 | 403 | ||
| 382 | RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); | 404 | RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); |
| 383 | resp = (struct creq_deinitialize_fw_resp *) | 405 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, |
| 384 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 406 | NULL, 0); |
| 385 | NULL, 0); | 407 | if (rc) |
| 386 | if (!resp) | 408 | return rc; |
| 387 | return -EINVAL; | ||
| 388 | |||
| 389 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) | ||
| 390 | return -ETIMEDOUT; | ||
| 391 | |||
| 392 | if (resp->status || | ||
| 393 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) | ||
| 394 | return -EFAULT; | ||
| 395 | 409 | ||
| 396 | clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); | 410 | clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); |
| 397 | return 0; | 411 | return 0; |
| @@ -417,9 +431,10 @@ static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl) | |||
| 417 | int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, | 431 | int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, |
| 418 | struct bnxt_qplib_ctx *ctx, int is_virtfn) | 432 | struct bnxt_qplib_ctx *ctx, int is_virtfn) |
| 419 | { | 433 | { |
| 420 | struct creq_initialize_fw_resp *resp; | ||
| 421 | struct cmdq_initialize_fw req; | 434 | struct cmdq_initialize_fw req; |
| 435 | struct creq_initialize_fw_resp resp; | ||
| 422 | u16 cmd_flags = 0, level; | 436 | u16 cmd_flags = 0, level; |
| 437 | int rc; | ||
| 423 | 438 | ||
| 424 | RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); | 439 | RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); |
| 425 | 440 | ||
| @@ -482,37 +497,19 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, | |||
| 482 | 497 | ||
| 483 | skip_ctx_setup: | 498 | skip_ctx_setup: |
| 484 | req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); | 499 | req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); |
| 485 | resp = (struct creq_initialize_fw_resp *) | 500 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, |
| 486 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 501 | NULL, 0); |
| 487 | NULL, 0); | 502 | if (rc) |
| 488 | if (!resp) { | 503 | return rc; |
| 489 | dev_err(&rcfw->pdev->dev, | ||
| 490 | "QPLIB: RCFW: INITIALIZE_FW send failed"); | ||
| 491 | return -EINVAL; | ||
| 492 | } | ||
| 493 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
| 494 | /* Cmd timed out */ | ||
| 495 | dev_err(&rcfw->pdev->dev, | ||
| 496 | "QPLIB: RCFW: INITIALIZE_FW timed out"); | ||
| 497 | return -ETIMEDOUT; | ||
| 498 | } | ||
| 499 | if (resp->status || | ||
| 500 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 501 | dev_err(&rcfw->pdev->dev, | ||
| 502 | "QPLIB: RCFW: INITIALIZE_FW failed"); | ||
| 503 | return -EINVAL; | ||
| 504 | } | ||
| 505 | set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); | 504 | set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); |
| 506 | return 0; | 505 | return 0; |
| 507 | } | 506 | } |
| 508 | 507 | ||
| 509 | void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) | 508 | void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) |
| 510 | { | 509 | { |
| 511 | bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb); | 510 | kfree(rcfw->crsqe_tbl); |
| 512 | kfree(rcfw->crsq.crsq); | ||
| 513 | bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); | 511 | bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); |
| 514 | bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); | 512 | bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); |
| 515 | |||
| 516 | rcfw->pdev = NULL; | 513 | rcfw->pdev = NULL; |
| 517 | } | 514 | } |
| 518 | 515 | ||
| @@ -539,21 +536,11 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, | |||
| 539 | goto fail; | 536 | goto fail; |
| 540 | } | 537 | } |
| 541 | 538 | ||
| 542 | rcfw->crsq.max_elements = rcfw->cmdq.max_elements; | 539 | rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements, |
| 543 | rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements, | 540 | sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); |
| 544 | sizeof(*rcfw->crsq.crsq), GFP_KERNEL); | 541 | if (!rcfw->crsqe_tbl) |
| 545 | if (!rcfw->crsq.crsq) | ||
| 546 | goto fail; | 542 | goto fail; |
| 547 | 543 | ||
| 548 | rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT; | ||
| 549 | if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0, | ||
| 550 | &rcfw->crsb.max_elements, | ||
| 551 | BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE, | ||
| 552 | HWQ_TYPE_CTX)) { | ||
| 553 | dev_err(&rcfw->pdev->dev, | ||
| 554 | "QPLIB: HW channel CRSB allocation failed"); | ||
| 555 | goto fail; | ||
| 556 | } | ||
| 557 | return 0; | 544 | return 0; |
| 558 | 545 | ||
| 559 | fail: | 546 | fail: |
| @@ -606,7 +593,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, | |||
| 606 | int rc; | 593 | int rc; |
| 607 | 594 | ||
| 608 | /* General */ | 595 | /* General */ |
| 609 | atomic_set(&rcfw->seq_num, 0); | 596 | rcfw->seq_num = 0; |
| 610 | rcfw->flags = FIRMWARE_FIRST_FLAG; | 597 | rcfw->flags = FIRMWARE_FIRST_FLAG; |
| 611 | bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * | 598 | bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * |
| 612 | sizeof(unsigned long)); | 599 | sizeof(unsigned long)); |
| @@ -636,10 +623,6 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, | |||
| 636 | 623 | ||
| 637 | rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; | 624 | rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; |
| 638 | 625 | ||
| 639 | /* CRSQ */ | ||
| 640 | rcfw->crsq.prod = 0; | ||
| 641 | rcfw->crsq.cons = 0; | ||
| 642 | |||
| 643 | /* CREQ */ | 626 | /* CREQ */ |
| 644 | rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; | 627 | rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; |
| 645 | res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); | 628 | res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); |
| @@ -692,3 +675,34 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, | |||
| 692 | __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); | 675 | __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); |
| 693 | return 0; | 676 | return 0; |
| 694 | } | 677 | } |
| 678 | |||
| 679 | struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( | ||
| 680 | struct bnxt_qplib_rcfw *rcfw, | ||
| 681 | u32 size) | ||
| 682 | { | ||
| 683 | struct bnxt_qplib_rcfw_sbuf *sbuf; | ||
| 684 | |||
| 685 | sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); | ||
| 686 | if (!sbuf) | ||
| 687 | return NULL; | ||
| 688 | |||
| 689 | sbuf->size = size; | ||
| 690 | sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, | ||
| 691 | &sbuf->dma_addr, GFP_ATOMIC); | ||
| 692 | if (!sbuf->sb) | ||
| 693 | goto bail; | ||
| 694 | |||
| 695 | return sbuf; | ||
| 696 | bail: | ||
| 697 | kfree(sbuf); | ||
| 698 | return NULL; | ||
| 699 | } | ||
| 700 | |||
| 701 | void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, | ||
| 702 | struct bnxt_qplib_rcfw_sbuf *sbuf) | ||
| 703 | { | ||
| 704 | if (sbuf->sb) | ||
| 705 | dma_free_coherent(&rcfw->pdev->dev, sbuf->size, | ||
| 706 | sbuf->sb, sbuf->dma_addr); | ||
| 707 | kfree(sbuf); | ||
| 708 | } | ||
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index d3567d75bf58..09ce121770cd 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | |||
| @@ -73,6 +73,7 @@ | |||
| 73 | #define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT | 73 | #define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT |
| 74 | #define RCFW_MAX_COOKIE_VALUE 0x7FFF | 74 | #define RCFW_MAX_COOKIE_VALUE 0x7FFF |
| 75 | #define RCFW_CMD_IS_BLOCKING 0x8000 | 75 | #define RCFW_CMD_IS_BLOCKING 0x8000 |
| 76 | #define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20 | ||
| 76 | 77 | ||
| 77 | /* Cmdq contains a fix number of a 16-Byte slots */ | 78 | /* Cmdq contains a fix number of a 16-Byte slots */ |
| 78 | struct bnxt_qplib_cmdqe { | 79 | struct bnxt_qplib_cmdqe { |
| @@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe { | |||
| 94 | u8 data[1024]; | 95 | u8 data[1024]; |
| 95 | }; | 96 | }; |
| 96 | 97 | ||
| 97 | /* CRSQ SB */ | ||
| 98 | #define BNXT_QPLIB_CRSBE_MAX_CNT 4 | ||
| 99 | #define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe) | ||
| 100 | #define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS) | ||
| 101 | |||
| 102 | #define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1) | ||
| 103 | #define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1) | ||
| 104 | |||
| 105 | static inline u32 get_crsb_pg(u32 val) | ||
| 106 | { | ||
| 107 | return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG; | ||
| 108 | } | ||
| 109 | |||
| 110 | static inline u32 get_crsb_idx(u32 val) | ||
| 111 | { | ||
| 112 | return val & MAX_CRSB_IDX_PER_PG; | ||
| 113 | } | ||
| 114 | |||
| 115 | static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr, | ||
| 116 | u32 prod, dma_addr_t *dma_addr) | ||
| 117 | { | ||
| 118 | *dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG]; | ||
| 119 | *dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) * | ||
| 120 | BNXT_QPLIB_CRSBE_UNITS; | ||
| 121 | } | ||
| 122 | |||
| 123 | /* CREQ */ | 98 | /* CREQ */ |
| 124 | /* Allocate 1 per QP for async error notification for now */ | 99 | /* Allocate 1 per QP for async error notification for now */ |
| 125 | #define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024) | 100 | #define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024) |
| @@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val) | |||
| 158 | #define CREQ_DB(db, raw_cons, cp_bit) \ | 133 | #define CREQ_DB(db, raw_cons, cp_bit) \ |
| 159 | writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) | 134 | writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) |
| 160 | 135 | ||
| 136 | #define CREQ_ENTRY_POLL_BUDGET 0x100 | ||
| 137 | |||
| 161 | /* HWQ */ | 138 | /* HWQ */ |
| 162 | struct bnxt_qplib_crsqe { | 139 | |
| 163 | struct creq_qp_event qp_event; | 140 | struct bnxt_qplib_crsq { |
| 141 | struct creq_qp_event *resp; | ||
| 164 | u32 req_size; | 142 | u32 req_size; |
| 165 | }; | 143 | }; |
| 166 | 144 | ||
| 167 | struct bnxt_qplib_crsq { | 145 | struct bnxt_qplib_rcfw_sbuf { |
| 168 | struct bnxt_qplib_crsqe *crsq; | 146 | void *sb; |
| 169 | u32 prod; | 147 | dma_addr_t dma_addr; |
| 170 | u32 cons; | 148 | u32 size; |
| 171 | u32 max_elements; | ||
| 172 | }; | 149 | }; |
| 173 | 150 | ||
| 174 | /* RCFW Communication Channels */ | 151 | /* RCFW Communication Channels */ |
| @@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw { | |||
| 185 | wait_queue_head_t waitq; | 162 | wait_queue_head_t waitq; |
| 186 | int (*aeq_handler)(struct bnxt_qplib_rcfw *, | 163 | int (*aeq_handler)(struct bnxt_qplib_rcfw *, |
| 187 | struct creq_func_event *); | 164 | struct creq_func_event *); |
| 188 | atomic_t seq_num; | 165 | u32 seq_num; |
| 189 | 166 | ||
| 190 | /* Bar region info */ | 167 | /* Bar region info */ |
| 191 | void __iomem *cmdq_bar_reg_iomem; | 168 | void __iomem *cmdq_bar_reg_iomem; |
| @@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw { | |||
| 203 | 180 | ||
| 204 | /* Actual Cmd and Resp Queues */ | 181 | /* Actual Cmd and Resp Queues */ |
| 205 | struct bnxt_qplib_hwq cmdq; | 182 | struct bnxt_qplib_hwq cmdq; |
| 206 | struct bnxt_qplib_crsq crsq; | 183 | struct bnxt_qplib_crsq *crsqe_tbl; |
| 207 | struct bnxt_qplib_hwq crsb; | ||
| 208 | }; | 184 | }; |
| 209 | 185 | ||
| 210 | void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); | 186 | void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); |
| @@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, | |||
| 219 | (struct bnxt_qplib_rcfw *, | 195 | (struct bnxt_qplib_rcfw *, |
| 220 | struct creq_func_event *)); | 196 | struct creq_func_event *)); |
| 221 | 197 | ||
| 222 | int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); | 198 | struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( |
| 223 | int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); | 199 | struct bnxt_qplib_rcfw *rcfw, |
| 224 | void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, | 200 | u32 size); |
| 225 | struct cmdq_base *req, void **crsbe, | 201 | void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, |
| 226 | u8 is_block); | 202 | struct bnxt_qplib_rcfw_sbuf *sbuf); |
| 203 | int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, | ||
| 204 | struct cmdq_base *req, struct creq_base *resp, | ||
| 205 | void *sbuf, u8 is_block); | ||
| 227 | 206 | ||
| 228 | int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); | 207 | int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); |
| 229 | int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, | 208 | int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 6277d802ca4b..2e4855509719 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h | |||
| @@ -48,6 +48,10 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; | |||
| 48 | 48 | ||
| 49 | #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) | 49 | #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) |
| 50 | 50 | ||
| 51 | #define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ | ||
| 52 | ((HWQ_CMP(hwq->prod, hwq)\ | ||
| 53 | - HWQ_CMP(hwq->cons, hwq))\ | ||
| 54 | & (hwq->max_elements - 1))) | ||
| 51 | enum bnxt_qplib_hwq_type { | 55 | enum bnxt_qplib_hwq_type { |
| 52 | HWQ_TYPE_CTX, | 56 | HWQ_TYPE_CTX, |
| 53 | HWQ_TYPE_QUEUE, | 57 | HWQ_TYPE_QUEUE, |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 7b31eccedf11..fde18cf0e406 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c | |||
| @@ -55,37 +55,30 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, | |||
| 55 | struct bnxt_qplib_dev_attr *attr) | 55 | struct bnxt_qplib_dev_attr *attr) |
| 56 | { | 56 | { |
| 57 | struct cmdq_query_func req; | 57 | struct cmdq_query_func req; |
| 58 | struct creq_query_func_resp *resp; | 58 | struct creq_query_func_resp resp; |
| 59 | struct bnxt_qplib_rcfw_sbuf *sbuf; | ||
| 59 | struct creq_query_func_resp_sb *sb; | 60 | struct creq_query_func_resp_sb *sb; |
| 60 | u16 cmd_flags = 0; | 61 | u16 cmd_flags = 0; |
| 61 | u32 temp; | 62 | u32 temp; |
| 62 | u8 *tqm_alloc; | 63 | u8 *tqm_alloc; |
| 63 | int i; | 64 | int i, rc = 0; |
| 64 | 65 | ||
| 65 | RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags); | 66 | RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags); |
| 66 | 67 | ||
| 67 | req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; | 68 | sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); |
| 68 | resp = (struct creq_query_func_resp *) | 69 | if (!sbuf) { |
| 69 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb, | ||
| 70 | 0); | ||
| 71 | if (!resp) { | ||
| 72 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed"); | ||
| 73 | return -EINVAL; | ||
| 74 | } | ||
| 75 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
| 76 | /* Cmd timed out */ | ||
| 77 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out"); | ||
| 78 | return -ETIMEDOUT; | ||
| 79 | } | ||
| 80 | if (resp->status || | ||
| 81 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 82 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed "); | ||
| 83 | dev_err(&rcfw->pdev->dev, | 70 | dev_err(&rcfw->pdev->dev, |
| 84 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | 71 | "QPLIB: SP: QUERY_FUNC alloc side buffer failed"); |
| 85 | resp->status, le16_to_cpu(req.cookie), | 72 | return -ENOMEM; |
| 86 | le16_to_cpu(resp->cookie)); | ||
| 87 | return -EINVAL; | ||
| 88 | } | 73 | } |
| 74 | |||
| 75 | sb = sbuf->sb; | ||
| 76 | req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; | ||
| 77 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, | ||
| 78 | (void *)sbuf, 0); | ||
| 79 | if (rc) | ||
| 80 | goto bail; | ||
| 81 | |||
| 89 | /* Extract the context from the side buffer */ | 82 | /* Extract the context from the side buffer */ |
| 90 | attr->max_qp = le32_to_cpu(sb->max_qp); | 83 | attr->max_qp = le32_to_cpu(sb->max_qp); |
| 91 | attr->max_qp_rd_atom = | 84 | attr->max_qp_rd_atom = |
| @@ -95,6 +88,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, | |||
| 95 | sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? | 88 | sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? |
| 96 | BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; | 89 | BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; |
| 97 | attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); | 90 | attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); |
| 91 | /* | ||
| 92 | * 128 WQEs needs to be reserved for the HW (8916). Prevent | ||
| 93 | * reporting the max number | ||
| 94 | */ | ||
| 95 | attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS; | ||
| 98 | attr->max_qp_sges = sb->max_sge; | 96 | attr->max_qp_sges = sb->max_sge; |
| 99 | attr->max_cq = le32_to_cpu(sb->max_cq); | 97 | attr->max_cq = le32_to_cpu(sb->max_cq); |
| 100 | attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); | 98 | attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); |
| @@ -130,7 +128,10 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, | |||
| 130 | attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); | 128 | attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); |
| 131 | attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); | 129 | attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); |
| 132 | } | 130 | } |
| 133 | return 0; | 131 | |
| 132 | bail: | ||
| 133 | bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); | ||
| 134 | return rc; | ||
| 134 | } | 135 | } |
| 135 | 136 | ||
| 136 | /* SGID */ | 137 | /* SGID */ |
| @@ -178,8 +179,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
| 178 | /* Remove GID from the SGID table */ | 179 | /* Remove GID from the SGID table */ |
| 179 | if (update) { | 180 | if (update) { |
| 180 | struct cmdq_delete_gid req; | 181 | struct cmdq_delete_gid req; |
| 181 | struct creq_delete_gid_resp *resp; | 182 | struct creq_delete_gid_resp resp; |
| 182 | u16 cmd_flags = 0; | 183 | u16 cmd_flags = 0; |
| 184 | int rc; | ||
| 183 | 185 | ||
| 184 | RCFW_CMD_PREP(req, DELETE_GID, cmd_flags); | 186 | RCFW_CMD_PREP(req, DELETE_GID, cmd_flags); |
| 185 | if (sgid_tbl->hw_id[index] == 0xFFFF) { | 187 | if (sgid_tbl->hw_id[index] == 0xFFFF) { |
| @@ -188,31 +190,10 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
| 188 | return -EINVAL; | 190 | return -EINVAL; |
| 189 | } | 191 | } |
| 190 | req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); | 192 | req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); |
| 191 | resp = (struct creq_delete_gid_resp *) | 193 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
| 192 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, | 194 | (void *)&resp, NULL, 0); |
| 193 | 0); | 195 | if (rc) |
| 194 | if (!resp) { | 196 | return rc; |
| 195 | dev_err(&res->pdev->dev, | ||
| 196 | "QPLIB: SP: DELETE_GID send failed"); | ||
| 197 | return -EINVAL; | ||
| 198 | } | ||
| 199 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, | ||
| 200 | le16_to_cpu(req.cookie))) { | ||
| 201 | /* Cmd timed out */ | ||
| 202 | dev_err(&res->pdev->dev, | ||
| 203 | "QPLIB: SP: DELETE_GID timed out"); | ||
| 204 | return -ETIMEDOUT; | ||
| 205 | } | ||
| 206 | if (resp->status || | ||
| 207 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 208 | dev_err(&res->pdev->dev, | ||
| 209 | "QPLIB: SP: DELETE_GID failed "); | ||
| 210 | dev_err(&res->pdev->dev, | ||
| 211 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 212 | resp->status, le16_to_cpu(req.cookie), | ||
| 213 | le16_to_cpu(resp->cookie)); | ||
| 214 | return -EINVAL; | ||
| 215 | } | ||
| 216 | } | 197 | } |
| 217 | memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, | 198 | memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, |
| 218 | sizeof(bnxt_qplib_gid_zero)); | 199 | sizeof(bnxt_qplib_gid_zero)); |
| @@ -234,7 +215,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
| 234 | struct bnxt_qplib_res, | 215 | struct bnxt_qplib_res, |
| 235 | sgid_tbl); | 216 | sgid_tbl); |
| 236 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 217 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 237 | int i, free_idx, rc = 0; | 218 | int i, free_idx; |
| 238 | 219 | ||
| 239 | if (!sgid_tbl) { | 220 | if (!sgid_tbl) { |
| 240 | dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated"); | 221 | dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated"); |
| @@ -266,10 +247,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
| 266 | } | 247 | } |
| 267 | if (update) { | 248 | if (update) { |
| 268 | struct cmdq_add_gid req; | 249 | struct cmdq_add_gid req; |
| 269 | struct creq_add_gid_resp *resp; | 250 | struct creq_add_gid_resp resp; |
| 270 | u16 cmd_flags = 0; | 251 | u16 cmd_flags = 0; |
| 271 | u32 temp32[4]; | 252 | u32 temp32[4]; |
| 272 | u16 temp16[3]; | 253 | u16 temp16[3]; |
| 254 | int rc; | ||
| 273 | 255 | ||
| 274 | RCFW_CMD_PREP(req, ADD_GID, cmd_flags); | 256 | RCFW_CMD_PREP(req, ADD_GID, cmd_flags); |
| 275 | 257 | ||
| @@ -290,31 +272,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
| 290 | req.src_mac[1] = cpu_to_be16(temp16[1]); | 272 | req.src_mac[1] = cpu_to_be16(temp16[1]); |
| 291 | req.src_mac[2] = cpu_to_be16(temp16[2]); | 273 | req.src_mac[2] = cpu_to_be16(temp16[2]); |
| 292 | 274 | ||
| 293 | resp = (struct creq_add_gid_resp *) | 275 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
| 294 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 276 | (void *)&resp, NULL, 0); |
| 295 | NULL, 0); | 277 | if (rc) |
| 296 | if (!resp) { | 278 | return rc; |
| 297 | dev_err(&res->pdev->dev, | 279 | sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid); |
| 298 | "QPLIB: SP: ADD_GID send failed"); | ||
| 299 | return -EINVAL; | ||
| 300 | } | ||
| 301 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, | ||
| 302 | le16_to_cpu(req.cookie))) { | ||
| 303 | /* Cmd timed out */ | ||
| 304 | dev_err(&res->pdev->dev, | ||
| 305 | "QPIB: SP: ADD_GID timed out"); | ||
| 306 | return -ETIMEDOUT; | ||
| 307 | } | ||
| 308 | if (resp->status || | ||
| 309 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 310 | dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed "); | ||
| 311 | dev_err(&res->pdev->dev, | ||
| 312 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 313 | resp->status, le16_to_cpu(req.cookie), | ||
| 314 | le16_to_cpu(resp->cookie)); | ||
| 315 | return -EINVAL; | ||
| 316 | } | ||
| 317 | sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid); | ||
| 318 | } | 280 | } |
| 319 | /* Add GID to the sgid_tbl */ | 281 | /* Add GID to the sgid_tbl */ |
| 320 | memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); | 282 | memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); |
| @@ -325,7 +287,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
| 325 | 287 | ||
| 326 | *index = free_idx; | 288 | *index = free_idx; |
| 327 | /* unlock */ | 289 | /* unlock */ |
| 328 | return rc; | 290 | return 0; |
| 329 | } | 291 | } |
| 330 | 292 | ||
| 331 | /* pkeys */ | 293 | /* pkeys */ |
| @@ -422,10 +384,11 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) | |||
| 422 | { | 384 | { |
| 423 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 385 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 424 | struct cmdq_create_ah req; | 386 | struct cmdq_create_ah req; |
| 425 | struct creq_create_ah_resp *resp; | 387 | struct creq_create_ah_resp resp; |
| 426 | u16 cmd_flags = 0; | 388 | u16 cmd_flags = 0; |
| 427 | u32 temp32[4]; | 389 | u32 temp32[4]; |
| 428 | u16 temp16[3]; | 390 | u16 temp16[3]; |
| 391 | int rc; | ||
| 429 | 392 | ||
| 430 | RCFW_CMD_PREP(req, CREATE_AH, cmd_flags); | 393 | RCFW_CMD_PREP(req, CREATE_AH, cmd_flags); |
| 431 | 394 | ||
| @@ -450,28 +413,12 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) | |||
| 450 | req.dest_mac[1] = cpu_to_le16(temp16[1]); | 413 | req.dest_mac[1] = cpu_to_le16(temp16[1]); |
| 451 | req.dest_mac[2] = cpu_to_le16(temp16[2]); | 414 | req.dest_mac[2] = cpu_to_le16(temp16[2]); |
| 452 | 415 | ||
| 453 | resp = (struct creq_create_ah_resp *) | 416 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, |
| 454 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 417 | NULL, 1); |
| 455 | NULL, 1); | 418 | if (rc) |
| 456 | if (!resp) { | 419 | return rc; |
| 457 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed"); | 420 | |
| 458 | return -EINVAL; | 421 | ah->id = le32_to_cpu(resp.xid); |
| 459 | } | ||
| 460 | if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
| 461 | /* Cmd timed out */ | ||
| 462 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out"); | ||
| 463 | return -ETIMEDOUT; | ||
| 464 | } | ||
| 465 | if (resp->status || | ||
| 466 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 467 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed "); | ||
| 468 | dev_err(&rcfw->pdev->dev, | ||
| 469 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 470 | resp->status, le16_to_cpu(req.cookie), | ||
| 471 | le16_to_cpu(resp->cookie)); | ||
| 472 | return -EINVAL; | ||
| 473 | } | ||
| 474 | ah->id = le32_to_cpu(resp->xid); | ||
| 475 | return 0; | 422 | return 0; |
| 476 | } | 423 | } |
| 477 | 424 | ||
| @@ -479,35 +426,19 @@ int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) | |||
| 479 | { | 426 | { |
| 480 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 427 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 481 | struct cmdq_destroy_ah req; | 428 | struct cmdq_destroy_ah req; |
| 482 | struct creq_destroy_ah_resp *resp; | 429 | struct creq_destroy_ah_resp resp; |
| 483 | u16 cmd_flags = 0; | 430 | u16 cmd_flags = 0; |
| 431 | int rc; | ||
| 484 | 432 | ||
| 485 | /* Clean up the AH table in the device */ | 433 | /* Clean up the AH table in the device */ |
| 486 | RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags); | 434 | RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags); |
| 487 | 435 | ||
| 488 | req.ah_cid = cpu_to_le32(ah->id); | 436 | req.ah_cid = cpu_to_le32(ah->id); |
| 489 | 437 | ||
| 490 | resp = (struct creq_destroy_ah_resp *) | 438 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, |
| 491 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 439 | NULL, 1); |
| 492 | NULL, 1); | 440 | if (rc) |
| 493 | if (!resp) { | 441 | return rc; |
| 494 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed"); | ||
| 495 | return -EINVAL; | ||
| 496 | } | ||
| 497 | if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
| 498 | /* Cmd timed out */ | ||
| 499 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out"); | ||
| 500 | return -ETIMEDOUT; | ||
| 501 | } | ||
| 502 | if (resp->status || | ||
| 503 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 504 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed "); | ||
| 505 | dev_err(&rcfw->pdev->dev, | ||
| 506 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 507 | resp->status, le16_to_cpu(req.cookie), | ||
| 508 | le16_to_cpu(resp->cookie)); | ||
| 509 | return -EINVAL; | ||
| 510 | } | ||
| 511 | return 0; | 442 | return 0; |
| 512 | } | 443 | } |
| 513 | 444 | ||
| @@ -516,8 +447,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) | |||
| 516 | { | 447 | { |
| 517 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 448 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 518 | struct cmdq_deallocate_key req; | 449 | struct cmdq_deallocate_key req; |
| 519 | struct creq_deallocate_key_resp *resp; | 450 | struct creq_deallocate_key_resp resp; |
| 520 | u16 cmd_flags = 0; | 451 | u16 cmd_flags = 0; |
| 452 | int rc; | ||
| 521 | 453 | ||
| 522 | if (mrw->lkey == 0xFFFFFFFF) { | 454 | if (mrw->lkey == 0xFFFFFFFF) { |
| 523 | dev_info(&res->pdev->dev, | 455 | dev_info(&res->pdev->dev, |
| @@ -536,27 +468,11 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) | |||
| 536 | else | 468 | else |
| 537 | req.key = cpu_to_le32(mrw->lkey); | 469 | req.key = cpu_to_le32(mrw->lkey); |
| 538 | 470 | ||
| 539 | resp = (struct creq_deallocate_key_resp *) | 471 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, |
| 540 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 472 | NULL, 0); |
| 541 | NULL, 0); | 473 | if (rc) |
| 542 | if (!resp) { | 474 | return rc; |
| 543 | dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed"); | 475 | |
| 544 | return -EINVAL; | ||
| 545 | } | ||
| 546 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
| 547 | /* Cmd timed out */ | ||
| 548 | dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out"); | ||
| 549 | return -ETIMEDOUT; | ||
| 550 | } | ||
| 551 | if (resp->status || | ||
| 552 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 553 | dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed "); | ||
| 554 | dev_err(&res->pdev->dev, | ||
| 555 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 556 | resp->status, le16_to_cpu(req.cookie), | ||
| 557 | le16_to_cpu(resp->cookie)); | ||
| 558 | return -EINVAL; | ||
| 559 | } | ||
| 560 | /* Free the qplib's MRW memory */ | 476 | /* Free the qplib's MRW memory */ |
| 561 | if (mrw->hwq.max_elements) | 477 | if (mrw->hwq.max_elements) |
| 562 | bnxt_qplib_free_hwq(res->pdev, &mrw->hwq); | 478 | bnxt_qplib_free_hwq(res->pdev, &mrw->hwq); |
| @@ -568,9 +484,10 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) | |||
| 568 | { | 484 | { |
| 569 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 485 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 570 | struct cmdq_allocate_mrw req; | 486 | struct cmdq_allocate_mrw req; |
| 571 | struct creq_allocate_mrw_resp *resp; | 487 | struct creq_allocate_mrw_resp resp; |
| 572 | u16 cmd_flags = 0; | 488 | u16 cmd_flags = 0; |
| 573 | unsigned long tmp; | 489 | unsigned long tmp; |
| 490 | int rc; | ||
| 574 | 491 | ||
| 575 | RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags); | 492 | RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags); |
| 576 | 493 | ||
| @@ -584,33 +501,17 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) | |||
| 584 | tmp = (unsigned long)mrw; | 501 | tmp = (unsigned long)mrw; |
| 585 | req.mrw_handle = cpu_to_le64(tmp); | 502 | req.mrw_handle = cpu_to_le64(tmp); |
| 586 | 503 | ||
| 587 | resp = (struct creq_allocate_mrw_resp *) | 504 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
| 588 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 505 | (void *)&resp, NULL, 0); |
| 589 | NULL, 0); | 506 | if (rc) |
| 590 | if (!resp) { | 507 | return rc; |
| 591 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed"); | 508 | |
| 592 | return -EINVAL; | ||
| 593 | } | ||
| 594 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
| 595 | /* Cmd timed out */ | ||
| 596 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out"); | ||
| 597 | return -ETIMEDOUT; | ||
| 598 | } | ||
| 599 | if (resp->status || | ||
| 600 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 601 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed "); | ||
| 602 | dev_err(&rcfw->pdev->dev, | ||
| 603 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 604 | resp->status, le16_to_cpu(req.cookie), | ||
| 605 | le16_to_cpu(resp->cookie)); | ||
| 606 | return -EINVAL; | ||
| 607 | } | ||
| 608 | if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || | 509 | if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || |
| 609 | (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || | 510 | (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || |
| 610 | (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) | 511 | (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) |
| 611 | mrw->rkey = le32_to_cpu(resp->xid); | 512 | mrw->rkey = le32_to_cpu(resp.xid); |
| 612 | else | 513 | else |
| 613 | mrw->lkey = le32_to_cpu(resp->xid); | 514 | mrw->lkey = le32_to_cpu(resp.xid); |
| 614 | return 0; | 515 | return 0; |
| 615 | } | 516 | } |
| 616 | 517 | ||
| @@ -619,40 +520,17 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw, | |||
| 619 | { | 520 | { |
| 620 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 521 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 621 | struct cmdq_deregister_mr req; | 522 | struct cmdq_deregister_mr req; |
| 622 | struct creq_deregister_mr_resp *resp; | 523 | struct creq_deregister_mr_resp resp; |
| 623 | u16 cmd_flags = 0; | 524 | u16 cmd_flags = 0; |
| 624 | int rc; | 525 | int rc; |
| 625 | 526 | ||
| 626 | RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags); | 527 | RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags); |
| 627 | 528 | ||
| 628 | req.lkey = cpu_to_le32(mrw->lkey); | 529 | req.lkey = cpu_to_le32(mrw->lkey); |
| 629 | resp = (struct creq_deregister_mr_resp *) | 530 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
| 630 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 531 | (void *)&resp, NULL, block); |
| 631 | NULL, block); | 532 | if (rc) |
| 632 | if (!resp) { | 533 | return rc; |
| 633 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed"); | ||
| 634 | return -EINVAL; | ||
| 635 | } | ||
| 636 | if (block) | ||
| 637 | rc = bnxt_qplib_rcfw_block_for_resp(rcfw, | ||
| 638 | le16_to_cpu(req.cookie)); | ||
| 639 | else | ||
| 640 | rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, | ||
| 641 | le16_to_cpu(req.cookie)); | ||
| 642 | if (!rc) { | ||
| 643 | /* Cmd timed out */ | ||
| 644 | dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out"); | ||
| 645 | return -ETIMEDOUT; | ||
| 646 | } | ||
| 647 | if (resp->status || | ||
| 648 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 649 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed "); | ||
| 650 | dev_err(&rcfw->pdev->dev, | ||
| 651 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 652 | resp->status, le16_to_cpu(req.cookie), | ||
| 653 | le16_to_cpu(resp->cookie)); | ||
| 654 | return -EINVAL; | ||
| 655 | } | ||
| 656 | 534 | ||
| 657 | /* Free the qplib's MR memory */ | 535 | /* Free the qplib's MR memory */ |
| 658 | if (mrw->hwq.max_elements) { | 536 | if (mrw->hwq.max_elements) { |
| @@ -669,7 +547,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, | |||
| 669 | { | 547 | { |
| 670 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 548 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 671 | struct cmdq_register_mr req; | 549 | struct cmdq_register_mr req; |
| 672 | struct creq_register_mr_resp *resp; | 550 | struct creq_register_mr_resp resp; |
| 673 | u16 cmd_flags = 0, level; | 551 | u16 cmd_flags = 0, level; |
| 674 | int pg_ptrs, pages, i, rc; | 552 | int pg_ptrs, pages, i, rc; |
| 675 | dma_addr_t **pbl_ptr; | 553 | dma_addr_t **pbl_ptr; |
| @@ -730,36 +608,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, | |||
| 730 | req.key = cpu_to_le32(mr->lkey); | 608 | req.key = cpu_to_le32(mr->lkey); |
| 731 | req.mr_size = cpu_to_le64(mr->total_size); | 609 | req.mr_size = cpu_to_le64(mr->total_size); |
| 732 | 610 | ||
| 733 | resp = (struct creq_register_mr_resp *) | 611 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
| 734 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 612 | (void *)&resp, NULL, block); |
| 735 | NULL, block); | 613 | if (rc) |
| 736 | if (!resp) { | ||
| 737 | dev_err(&res->pdev->dev, "SP: REG_MR send failed"); | ||
| 738 | rc = -EINVAL; | ||
| 739 | goto fail; | ||
| 740 | } | ||
| 741 | if (block) | ||
| 742 | rc = bnxt_qplib_rcfw_block_for_resp(rcfw, | ||
| 743 | le16_to_cpu(req.cookie)); | ||
| 744 | else | ||
| 745 | rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, | ||
| 746 | le16_to_cpu(req.cookie)); | ||
| 747 | if (!rc) { | ||
| 748 | /* Cmd timed out */ | ||
| 749 | dev_err(&res->pdev->dev, "SP: REG_MR timed out"); | ||
| 750 | rc = -ETIMEDOUT; | ||
| 751 | goto fail; | ||
| 752 | } | ||
| 753 | if (resp->status || | ||
| 754 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 755 | dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed "); | ||
| 756 | dev_err(&res->pdev->dev, | ||
| 757 | "QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 758 | resp->status, le16_to_cpu(req.cookie), | ||
| 759 | le16_to_cpu(resp->cookie)); | ||
| 760 | rc = -EINVAL; | ||
| 761 | goto fail; | 614 | goto fail; |
| 762 | } | 615 | |
| 763 | return 0; | 616 | return 0; |
| 764 | 617 | ||
| 765 | fail: | 618 | fail: |
| @@ -804,35 +657,15 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids) | |||
| 804 | { | 657 | { |
| 805 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 658 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
| 806 | struct cmdq_map_tc_to_cos req; | 659 | struct cmdq_map_tc_to_cos req; |
| 807 | struct creq_map_tc_to_cos_resp *resp; | 660 | struct creq_map_tc_to_cos_resp resp; |
| 808 | u16 cmd_flags = 0; | 661 | u16 cmd_flags = 0; |
| 809 | int tleft; | 662 | int rc = 0; |
| 810 | 663 | ||
| 811 | RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags); | 664 | RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags); |
| 812 | req.cos0 = cpu_to_le16(cids[0]); | 665 | req.cos0 = cpu_to_le16(cids[0]); |
| 813 | req.cos1 = cpu_to_le16(cids[1]); | 666 | req.cos1 = cpu_to_le16(cids[1]); |
| 814 | 667 | ||
| 815 | resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0); | 668 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
| 816 | if (!resp) { | 669 | (void *)&resp, NULL, 0); |
| 817 | dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed"); | ||
| 818 | return -EINVAL; | ||
| 819 | } | ||
| 820 | |||
| 821 | tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie)); | ||
| 822 | if (!tleft) { | ||
| 823 | dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out"); | ||
| 824 | return -ETIMEDOUT; | ||
| 825 | } | ||
| 826 | |||
| 827 | if (resp->status || | ||
| 828 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
| 829 | dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed "); | ||
| 830 | dev_err(&res->pdev->dev, | ||
| 831 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
| 832 | resp->status, le16_to_cpu(req.cookie), | ||
| 833 | le16_to_cpu(resp->cookie)); | ||
| 834 | return -EINVAL; | ||
| 835 | } | ||
| 836 | |||
| 837 | return 0; | 670 | return 0; |
| 838 | } | 671 | } |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 1442a617e968..a543f959098b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h | |||
| @@ -40,6 +40,8 @@ | |||
| 40 | #ifndef __BNXT_QPLIB_SP_H__ | 40 | #ifndef __BNXT_QPLIB_SP_H__ |
| 41 | #define __BNXT_QPLIB_SP_H__ | 41 | #define __BNXT_QPLIB_SP_H__ |
| 42 | 42 | ||
| 43 | #define BNXT_QPLIB_RESERVED_QP_WRS 128 | ||
| 44 | |||
| 43 | struct bnxt_qplib_dev_attr { | 45 | struct bnxt_qplib_dev_attr { |
| 44 | char fw_ver[32]; | 46 | char fw_ver[32]; |
| 45 | u16 max_sgid; | 47 | u16 max_sgid; |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index b6fe45924c6e..0910faf3587b 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
| @@ -488,6 +488,7 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 488 | 488 | ||
| 489 | ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); | 489 | ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); |
| 490 | release_ep_resources(ep); | 490 | release_ep_resources(ep); |
| 491 | kfree_skb(skb); | ||
| 491 | return 0; | 492 | return 0; |
| 492 | } | 493 | } |
| 493 | 494 | ||
| @@ -498,6 +499,7 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 498 | ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); | 499 | ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); |
| 499 | c4iw_put_ep(&ep->parent_ep->com); | 500 | c4iw_put_ep(&ep->parent_ep->com); |
| 500 | release_ep_resources(ep); | 501 | release_ep_resources(ep); |
| 502 | kfree_skb(skb); | ||
| 501 | return 0; | 503 | return 0; |
| 502 | } | 504 | } |
| 503 | 505 | ||
| @@ -569,11 +571,13 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb) | |||
| 569 | 571 | ||
| 570 | pr_debug("%s rdev %p\n", __func__, rdev); | 572 | pr_debug("%s rdev %p\n", __func__, rdev); |
| 571 | req->cmd = CPL_ABORT_NO_RST; | 573 | req->cmd = CPL_ABORT_NO_RST; |
| 574 | skb_get(skb); | ||
| 572 | ret = c4iw_ofld_send(rdev, skb); | 575 | ret = c4iw_ofld_send(rdev, skb); |
| 573 | if (ret) { | 576 | if (ret) { |
| 574 | __state_set(&ep->com, DEAD); | 577 | __state_set(&ep->com, DEAD); |
| 575 | queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); | 578 | queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); |
| 576 | } | 579 | } else |
| 580 | kfree_skb(skb); | ||
| 577 | } | 581 | } |
| 578 | 582 | ||
| 579 | static int send_flowc(struct c4iw_ep *ep) | 583 | static int send_flowc(struct c4iw_ep *ep) |
| @@ -2517,7 +2521,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 2517 | goto reject; | 2521 | goto reject; |
| 2518 | } | 2522 | } |
| 2519 | 2523 | ||
| 2520 | hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) + | 2524 | hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + |
| 2525 | sizeof(struct tcphdr) + | ||
| 2521 | ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); | 2526 | ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); |
| 2522 | if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) | 2527 | if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) |
| 2523 | child_ep->mtu = peer_mss + hdrs; | 2528 | child_ep->mtu = peer_mss + hdrs; |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 329fb65e8fb0..ae0b79aeea2e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
| @@ -767,7 +767,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, | |||
| 767 | kfree(entry); | 767 | kfree(entry); |
| 768 | } | 768 | } |
| 769 | 769 | ||
| 770 | list_for_each_safe(pos, nxt, &uctx->qpids) { | 770 | list_for_each_safe(pos, nxt, &uctx->cqids) { |
| 771 | entry = list_entry(pos, struct c4iw_qid_list, entry); | 771 | entry = list_entry(pos, struct c4iw_qid_list, entry); |
| 772 | list_del_init(&entry->entry); | 772 | list_del_init(&entry->entry); |
| 773 | kfree(entry); | 773 | kfree(entry); |
| @@ -880,13 +880,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) | |||
| 880 | rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); | 880 | rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); |
| 881 | if (!rdev->free_workq) { | 881 | if (!rdev->free_workq) { |
| 882 | err = -ENOMEM; | 882 | err = -ENOMEM; |
| 883 | goto err_free_status_page; | 883 | goto err_free_status_page_and_wr_log; |
| 884 | } | 884 | } |
| 885 | 885 | ||
| 886 | rdev->status_page->db_off = 0; | 886 | rdev->status_page->db_off = 0; |
| 887 | 887 | ||
| 888 | return 0; | 888 | return 0; |
| 889 | err_free_status_page: | 889 | err_free_status_page_and_wr_log: |
| 890 | if (c4iw_wr_log && rdev->wr_log) | ||
| 891 | kfree(rdev->wr_log); | ||
| 890 | free_page((unsigned long)rdev->status_page); | 892 | free_page((unsigned long)rdev->status_page); |
| 891 | destroy_ocqp_pool: | 893 | destroy_ocqp_pool: |
| 892 | c4iw_ocqp_pool_destroy(rdev); | 894 | c4iw_ocqp_pool_destroy(rdev); |
| @@ -903,9 +905,11 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) | |||
| 903 | { | 905 | { |
| 904 | destroy_workqueue(rdev->free_workq); | 906 | destroy_workqueue(rdev->free_workq); |
| 905 | kfree(rdev->wr_log); | 907 | kfree(rdev->wr_log); |
| 908 | c4iw_release_dev_ucontext(rdev, &rdev->uctx); | ||
| 906 | free_page((unsigned long)rdev->status_page); | 909 | free_page((unsigned long)rdev->status_page); |
| 907 | c4iw_pblpool_destroy(rdev); | 910 | c4iw_pblpool_destroy(rdev); |
| 908 | c4iw_rqtpool_destroy(rdev); | 911 | c4iw_rqtpool_destroy(rdev); |
| 912 | c4iw_ocqp_pool_destroy(rdev); | ||
| 909 | c4iw_destroy_resource(&rdev->resource); | 913 | c4iw_destroy_resource(&rdev->resource); |
| 910 | } | 914 | } |
| 911 | 915 | ||
| @@ -971,7 +975,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
| 971 | devp->rdev.lldi.sge_egrstatuspagesize); | 975 | devp->rdev.lldi.sge_egrstatuspagesize); |
| 972 | 976 | ||
| 973 | devp->rdev.hw_queue.t4_eq_status_entries = | 977 | devp->rdev.hw_queue.t4_eq_status_entries = |
| 974 | devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1; | 978 | devp->rdev.lldi.sge_egrstatuspagesize / 64; |
| 975 | devp->rdev.hw_queue.t4_max_eq_size = 65520; | 979 | devp->rdev.hw_queue.t4_max_eq_size = 65520; |
| 976 | devp->rdev.hw_queue.t4_max_iq_size = 65520; | 980 | devp->rdev.hw_queue.t4_max_iq_size = 65520; |
| 977 | devp->rdev.hw_queue.t4_max_rq_size = 8192 - | 981 | devp->rdev.hw_queue.t4_max_rq_size = 8192 - |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 5d6b1eeaa9a0..2ba00b89df6a 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
| @@ -6312,25 +6312,38 @@ static void handle_8051_request(struct hfi1_pportdata *ppd) | |||
| 6312 | } | 6312 | } |
| 6313 | } | 6313 | } |
| 6314 | 6314 | ||
| 6315 | static void write_global_credit(struct hfi1_devdata *dd, | 6315 | /* |
| 6316 | u8 vau, u16 total, u16 shared) | 6316 | * Set up allocation unit vaulue. |
| 6317 | */ | ||
| 6318 | void set_up_vau(struct hfi1_devdata *dd, u8 vau) | ||
| 6317 | { | 6319 | { |
| 6318 | write_csr(dd, SEND_CM_GLOBAL_CREDIT, | 6320 | u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); |
| 6319 | ((u64)total << | 6321 | |
| 6320 | SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) | | 6322 | /* do not modify other values in the register */ |
| 6321 | ((u64)shared << | 6323 | reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK; |
| 6322 | SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) | | 6324 | reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT; |
| 6323 | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT)); | 6325 | write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); |
| 6324 | } | 6326 | } |
| 6325 | 6327 | ||
| 6326 | /* | 6328 | /* |
| 6327 | * Set up initial VL15 credits of the remote. Assumes the rest of | 6329 | * Set up initial VL15 credits of the remote. Assumes the rest of |
| 6328 | * the CM credit registers are zero from a previous global or credit reset . | 6330 | * the CM credit registers are zero from a previous global or credit reset. |
| 6331 | * Shared limit for VL15 will always be 0. | ||
| 6329 | */ | 6332 | */ |
| 6330 | void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf) | 6333 | void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf) |
| 6331 | { | 6334 | { |
| 6332 | /* leave shared count at zero for both global and VL15 */ | 6335 | u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); |
| 6333 | write_global_credit(dd, vau, vl15buf, 0); | 6336 | |
| 6337 | /* set initial values for total and shared credit limit */ | ||
| 6338 | reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK | | ||
| 6339 | SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK); | ||
| 6340 | |||
| 6341 | /* | ||
| 6342 | * Set total limit to be equal to VL15 credits. | ||
| 6343 | * Leave shared limit at 0. | ||
| 6344 | */ | ||
| 6345 | reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; | ||
| 6346 | write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); | ||
| 6334 | 6347 | ||
| 6335 | write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf | 6348 | write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf |
| 6336 | << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); | 6349 | << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); |
| @@ -6348,9 +6361,11 @@ void reset_link_credits(struct hfi1_devdata *dd) | |||
| 6348 | for (i = 0; i < TXE_NUM_DATA_VL; i++) | 6361 | for (i = 0; i < TXE_NUM_DATA_VL; i++) |
| 6349 | write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); | 6362 | write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); |
| 6350 | write_csr(dd, SEND_CM_CREDIT_VL15, 0); | 6363 | write_csr(dd, SEND_CM_CREDIT_VL15, 0); |
| 6351 | write_global_credit(dd, 0, 0, 0); | 6364 | write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0); |
| 6352 | /* reset the CM block */ | 6365 | /* reset the CM block */ |
| 6353 | pio_send_control(dd, PSC_CM_RESET); | 6366 | pio_send_control(dd, PSC_CM_RESET); |
| 6367 | /* reset cached value */ | ||
| 6368 | dd->vl15buf_cached = 0; | ||
| 6354 | } | 6369 | } |
| 6355 | 6370 | ||
| 6356 | /* convert a vCU to a CU */ | 6371 | /* convert a vCU to a CU */ |
| @@ -6839,24 +6854,35 @@ void handle_link_up(struct work_struct *work) | |||
| 6839 | { | 6854 | { |
| 6840 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, | 6855 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, |
| 6841 | link_up_work); | 6856 | link_up_work); |
| 6857 | struct hfi1_devdata *dd = ppd->dd; | ||
| 6858 | |||
| 6842 | set_link_state(ppd, HLS_UP_INIT); | 6859 | set_link_state(ppd, HLS_UP_INIT); |
| 6843 | 6860 | ||
| 6844 | /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ | 6861 | /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ |
| 6845 | read_ltp_rtt(ppd->dd); | 6862 | read_ltp_rtt(dd); |
| 6846 | /* | 6863 | /* |
| 6847 | * OPA specifies that certain counters are cleared on a transition | 6864 | * OPA specifies that certain counters are cleared on a transition |
| 6848 | * to link up, so do that. | 6865 | * to link up, so do that. |
| 6849 | */ | 6866 | */ |
| 6850 | clear_linkup_counters(ppd->dd); | 6867 | clear_linkup_counters(dd); |
| 6851 | /* | 6868 | /* |
| 6852 | * And (re)set link up default values. | 6869 | * And (re)set link up default values. |
| 6853 | */ | 6870 | */ |
| 6854 | set_linkup_defaults(ppd); | 6871 | set_linkup_defaults(ppd); |
| 6855 | 6872 | ||
| 6873 | /* | ||
| 6874 | * Set VL15 credits. Use cached value from verify cap interrupt. | ||
| 6875 | * In case of quick linkup or simulator, vl15 value will be set by | ||
| 6876 | * handle_linkup_change. VerifyCap interrupt handler will not be | ||
| 6877 | * called in those scenarios. | ||
| 6878 | */ | ||
| 6879 | if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) | ||
| 6880 | set_up_vl15(dd, dd->vl15buf_cached); | ||
| 6881 | |||
| 6856 | /* enforce link speed enabled */ | 6882 | /* enforce link speed enabled */ |
| 6857 | if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { | 6883 | if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { |
| 6858 | /* oops - current speed is not enabled, bounce */ | 6884 | /* oops - current speed is not enabled, bounce */ |
| 6859 | dd_dev_err(ppd->dd, | 6885 | dd_dev_err(dd, |
| 6860 | "Link speed active 0x%x is outside enabled 0x%x, downing link\n", | 6886 | "Link speed active 0x%x is outside enabled 0x%x, downing link\n", |
| 6861 | ppd->link_speed_active, ppd->link_speed_enabled); | 6887 | ppd->link_speed_active, ppd->link_speed_enabled); |
| 6862 | set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, | 6888 | set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, |
| @@ -7357,7 +7383,14 @@ void handle_verify_cap(struct work_struct *work) | |||
| 7357 | */ | 7383 | */ |
| 7358 | if (vau == 0) | 7384 | if (vau == 0) |
| 7359 | vau = 1; | 7385 | vau = 1; |
| 7360 | set_up_vl15(dd, vau, vl15buf); | 7386 | set_up_vau(dd, vau); |
| 7387 | |||
| 7388 | /* | ||
| 7389 | * Set VL15 credits to 0 in global credit register. Cache remote VL15 | ||
| 7390 | * credits value and wait for link-up interrupt ot set it. | ||
| 7391 | */ | ||
| 7392 | set_up_vl15(dd, 0); | ||
| 7393 | dd->vl15buf_cached = vl15buf; | ||
| 7361 | 7394 | ||
| 7362 | /* set up the LCB CRC mode */ | 7395 | /* set up the LCB CRC mode */ |
| 7363 | crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; | 7396 | crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; |
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h index 5bfa839d1c48..793514f1d15f 100644 --- a/drivers/infiniband/hw/hfi1/chip_registers.h +++ b/drivers/infiniband/hw/hfi1/chip_registers.h | |||
| @@ -839,7 +839,9 @@ | |||
| 839 | #define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull | 839 | #define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull |
| 840 | #define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull | 840 | #define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull |
| 841 | #define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508) | 841 | #define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508) |
| 842 | #define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull | ||
| 842 | #define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16 | 843 | #define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16 |
| 844 | #define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull | ||
| 843 | #define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull | 845 | #define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull |
| 844 | #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull | 846 | #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull |
| 845 | #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0 | 847 | #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0 |
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index da322e6668cc..414a04a481c2 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h | |||
| @@ -1045,6 +1045,14 @@ struct hfi1_devdata { | |||
| 1045 | /* initial vl15 credits to use */ | 1045 | /* initial vl15 credits to use */ |
| 1046 | u16 vl15_init; | 1046 | u16 vl15_init; |
| 1047 | 1047 | ||
| 1048 | /* | ||
| 1049 | * Cached value for vl15buf, read during verify cap interrupt. VL15 | ||
| 1050 | * credits are to be kept at 0 and set when handling the link-up | ||
| 1051 | * interrupt. This removes the possibility of receiving VL15 MAD | ||
| 1052 | * packets before this HFI is ready. | ||
| 1053 | */ | ||
| 1054 | u16 vl15buf_cached; | ||
| 1055 | |||
| 1048 | /* Misc small ints */ | 1056 | /* Misc small ints */ |
| 1049 | u8 n_krcv_queues; | 1057 | u8 n_krcv_queues; |
| 1050 | u8 qos_shift; | 1058 | u8 qos_shift; |
| @@ -1598,7 +1606,8 @@ int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode); | |||
| 1598 | int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t); | 1606 | int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t); |
| 1599 | int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t); | 1607 | int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t); |
| 1600 | 1608 | ||
| 1601 | void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf); | 1609 | void set_up_vau(struct hfi1_devdata *dd, u8 vau); |
| 1610 | void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf); | ||
| 1602 | void reset_link_credits(struct hfi1_devdata *dd); | 1611 | void reset_link_credits(struct hfi1_devdata *dd); |
| 1603 | void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); | 1612 | void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); |
| 1604 | 1613 | ||
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c index ba265d0ae93b..04a5082d5ac5 100644 --- a/drivers/infiniband/hw/hfi1/intr.c +++ b/drivers/infiniband/hw/hfi1/intr.c | |||
| @@ -130,7 +130,8 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup) | |||
| 130 | * the remote values. Both sides must be using the values. | 130 | * the remote values. Both sides must be using the values. |
| 131 | */ | 131 | */ |
| 132 | if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { | 132 | if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { |
| 133 | set_up_vl15(dd, dd->vau, dd->vl15_init); | 133 | set_up_vau(dd, dd->vau); |
| 134 | set_up_vl15(dd, dd->vl15_init); | ||
| 134 | assign_remote_cm_au_table(dd, dd->vcu); | 135 | assign_remote_cm_au_table(dd, dd->vcu); |
| 135 | } | 136 | } |
| 136 | 137 | ||
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 93faf86d54b6..6a9f6f9819e1 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c | |||
| @@ -207,8 +207,8 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) | |||
| 207 | /* | 207 | /* |
| 208 | * Save BARs and command to rewrite after device reset. | 208 | * Save BARs and command to rewrite after device reset. |
| 209 | */ | 209 | */ |
| 210 | dd->pcibar0 = addr; | 210 | pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0); |
| 211 | dd->pcibar1 = addr >> 32; | 211 | pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1); |
| 212 | pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom); | 212 | pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom); |
| 213 | pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command); | 213 | pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command); |
| 214 | pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl); | 214 | pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl); |
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 069bdaf061ab..1080778a1f7c 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c | |||
| @@ -2159,8 +2159,11 @@ send_last: | |||
| 2159 | ret = hfi1_rvt_get_rwqe(qp, 1); | 2159 | ret = hfi1_rvt_get_rwqe(qp, 1); |
| 2160 | if (ret < 0) | 2160 | if (ret < 0) |
| 2161 | goto nack_op_err; | 2161 | goto nack_op_err; |
| 2162 | if (!ret) | 2162 | if (!ret) { |
| 2163 | /* peer will send again */ | ||
| 2164 | rvt_put_ss(&qp->r_sge); | ||
| 2163 | goto rnr_nak; | 2165 | goto rnr_nak; |
| 2166 | } | ||
| 2164 | wc.ex.imm_data = ohdr->u.rc.imm_data; | 2167 | wc.ex.imm_data = ohdr->u.rc.imm_data; |
| 2165 | wc.wc_flags = IB_WC_WITH_IMM; | 2168 | wc.wc_flags = IB_WC_WITH_IMM; |
| 2166 | goto send_last; | 2169 | goto send_last; |
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index 50d140d25e38..2f3bbcac1e34 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c | |||
| @@ -196,7 +196,8 @@ static const struct sysfs_ops port_cc_sysfs_ops = { | |||
| 196 | }; | 196 | }; |
| 197 | 197 | ||
| 198 | static struct attribute *port_cc_default_attributes[] = { | 198 | static struct attribute *port_cc_default_attributes[] = { |
| 199 | &cc_prescan_attr.attr | 199 | &cc_prescan_attr.attr, |
| 200 | NULL | ||
| 200 | }; | 201 | }; |
| 201 | 202 | ||
| 202 | static struct kobj_type port_cc_ktype = { | 203 | static struct kobj_type port_cc_ktype = { |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index f3bc01bce483..6ae98aa7f74e 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c | |||
| @@ -784,7 +784,6 @@ static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node, | |||
| 784 | } | 784 | } |
| 785 | 785 | ||
| 786 | ctrl_ird |= IETF_PEER_TO_PEER; | 786 | ctrl_ird |= IETF_PEER_TO_PEER; |
| 787 | ctrl_ird |= IETF_FLPDU_ZERO_LEN; | ||
| 788 | 787 | ||
| 789 | switch (mpa_key) { | 788 | switch (mpa_key) { |
| 790 | case MPA_KEY_REQUEST: | 789 | case MPA_KEY_REQUEST: |
| @@ -2446,8 +2445,8 @@ static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node, | |||
| 2446 | } else { | 2445 | } else { |
| 2447 | type = I40IW_CM_EVENT_CONNECTED; | 2446 | type = I40IW_CM_EVENT_CONNECTED; |
| 2448 | cm_node->state = I40IW_CM_STATE_OFFLOADED; | 2447 | cm_node->state = I40IW_CM_STATE_OFFLOADED; |
| 2449 | i40iw_send_ack(cm_node); | ||
| 2450 | } | 2448 | } |
| 2449 | i40iw_send_ack(cm_node); | ||
| 2451 | break; | 2450 | break; |
| 2452 | default: | 2451 | default: |
| 2453 | pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state); | 2452 | pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state); |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index f82483b3d1e7..a027e2072477 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c | |||
| @@ -285,28 +285,20 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa | |||
| 285 | struct i40iw_sc_dev *dev = vsi->dev; | 285 | struct i40iw_sc_dev *dev = vsi->dev; |
| 286 | struct i40iw_sc_qp *qp = NULL; | 286 | struct i40iw_sc_qp *qp = NULL; |
| 287 | bool qs_handle_change = false; | 287 | bool qs_handle_change = false; |
| 288 | bool mss_change = false; | ||
| 289 | unsigned long flags; | 288 | unsigned long flags; |
| 290 | u16 qs_handle; | 289 | u16 qs_handle; |
| 291 | int i; | 290 | int i; |
| 292 | 291 | ||
| 293 | if (vsi->mss != l2params->mss) { | 292 | vsi->mss = l2params->mss; |
| 294 | mss_change = true; | ||
| 295 | vsi->mss = l2params->mss; | ||
| 296 | } | ||
| 297 | 293 | ||
| 298 | i40iw_fill_qos_list(l2params->qs_handle_list); | 294 | i40iw_fill_qos_list(l2params->qs_handle_list); |
| 299 | for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { | 295 | for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { |
| 300 | qs_handle = l2params->qs_handle_list[i]; | 296 | qs_handle = l2params->qs_handle_list[i]; |
| 301 | if (vsi->qos[i].qs_handle != qs_handle) | 297 | if (vsi->qos[i].qs_handle != qs_handle) |
| 302 | qs_handle_change = true; | 298 | qs_handle_change = true; |
| 303 | else if (!mss_change) | ||
| 304 | continue; /* no MSS nor qs handle change */ | ||
| 305 | spin_lock_irqsave(&vsi->qos[i].lock, flags); | 299 | spin_lock_irqsave(&vsi->qos[i].lock, flags); |
| 306 | qp = i40iw_get_qp(&vsi->qos[i].qplist, qp); | 300 | qp = i40iw_get_qp(&vsi->qos[i].qplist, qp); |
| 307 | while (qp) { | 301 | while (qp) { |
| 308 | if (mss_change) | ||
| 309 | i40iw_qp_mss_modify(dev, qp); | ||
| 310 | if (qs_handle_change) { | 302 | if (qs_handle_change) { |
| 311 | qp->qs_handle = qs_handle; | 303 | qp->qs_handle = qs_handle; |
| 312 | /* issue cqp suspend command */ | 304 | /* issue cqp suspend command */ |
| @@ -2395,7 +2387,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify( | |||
| 2395 | 2387 | ||
| 2396 | set_64bit_val(wqe, | 2388 | set_64bit_val(wqe, |
| 2397 | 8, | 2389 | 8, |
| 2398 | LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) | | ||
| 2399 | LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN)); | 2390 | LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN)); |
| 2400 | 2391 | ||
| 2401 | set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); | 2392 | set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); |
| @@ -2410,7 +2401,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify( | |||
| 2410 | LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | | 2401 | LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | |
| 2411 | LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) | | 2402 | LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) | |
| 2412 | LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | | 2403 | LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | |
| 2413 | LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) | | ||
| 2414 | LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | | 2404 | LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | |
| 2415 | LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | | 2405 | LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | |
| 2416 | LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) | | 2406 | LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) | |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 2728af3103ce..a3f18a22f5ed 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c | |||
| @@ -1319,13 +1319,13 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, | |||
| 1319 | status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, | 1319 | status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, |
| 1320 | I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); | 1320 | I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); |
| 1321 | if (status) | 1321 | if (status) |
| 1322 | goto exit; | 1322 | goto error; |
| 1323 | info.fpm_query_buf_pa = mem.pa; | 1323 | info.fpm_query_buf_pa = mem.pa; |
| 1324 | info.fpm_query_buf = mem.va; | 1324 | info.fpm_query_buf = mem.va; |
| 1325 | status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, | 1325 | status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, |
| 1326 | I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK); | 1326 | I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK); |
| 1327 | if (status) | 1327 | if (status) |
| 1328 | goto exit; | 1328 | goto error; |
| 1329 | info.fpm_commit_buf_pa = mem.pa; | 1329 | info.fpm_commit_buf_pa = mem.pa; |
| 1330 | info.fpm_commit_buf = mem.va; | 1330 | info.fpm_commit_buf = mem.va; |
| 1331 | info.hmc_fn_id = ldev->fid; | 1331 | info.hmc_fn_id = ldev->fid; |
| @@ -1347,11 +1347,9 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, | |||
| 1347 | info.exception_lan_queue = 1; | 1347 | info.exception_lan_queue = 1; |
| 1348 | info.vchnl_send = i40iw_virtchnl_send; | 1348 | info.vchnl_send = i40iw_virtchnl_send; |
| 1349 | status = i40iw_device_init(&iwdev->sc_dev, &info); | 1349 | status = i40iw_device_init(&iwdev->sc_dev, &info); |
| 1350 | exit: | 1350 | |
| 1351 | if (status) { | 1351 | if (status) |
| 1352 | kfree(iwdev->hmc_info_mem); | 1352 | goto error; |
| 1353 | iwdev->hmc_info_mem = NULL; | ||
| 1354 | } | ||
| 1355 | memset(&vsi_info, 0, sizeof(vsi_info)); | 1353 | memset(&vsi_info, 0, sizeof(vsi_info)); |
| 1356 | vsi_info.dev = &iwdev->sc_dev; | 1354 | vsi_info.dev = &iwdev->sc_dev; |
| 1357 | vsi_info.back_vsi = (void *)iwdev; | 1355 | vsi_info.back_vsi = (void *)iwdev; |
| @@ -1362,11 +1360,19 @@ exit: | |||
| 1362 | memset(&stats_info, 0, sizeof(stats_info)); | 1360 | memset(&stats_info, 0, sizeof(stats_info)); |
| 1363 | stats_info.fcn_id = ldev->fid; | 1361 | stats_info.fcn_id = ldev->fid; |
| 1364 | stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); | 1362 | stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); |
| 1363 | if (!stats_info.pestat) { | ||
| 1364 | status = I40IW_ERR_NO_MEMORY; | ||
| 1365 | goto error; | ||
| 1366 | } | ||
| 1365 | stats_info.stats_initialize = true; | 1367 | stats_info.stats_initialize = true; |
| 1366 | if (stats_info.pestat) | 1368 | if (stats_info.pestat) |
| 1367 | i40iw_vsi_stats_init(&iwdev->vsi, &stats_info); | 1369 | i40iw_vsi_stats_init(&iwdev->vsi, &stats_info); |
| 1368 | } | 1370 | } |
| 1369 | return status; | 1371 | return status; |
| 1372 | error: | ||
| 1373 | kfree(iwdev->hmc_info_mem); | ||
| 1374 | iwdev->hmc_info_mem = NULL; | ||
| 1375 | return status; | ||
| 1370 | } | 1376 | } |
| 1371 | 1377 | ||
| 1372 | /** | 1378 | /** |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h index aa66c1c63dfa..f27be3e7830b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h +++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h | |||
| @@ -199,7 +199,6 @@ void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev, | |||
| 199 | struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx); | 199 | struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx); |
| 200 | void *i40iw_remove_head(struct list_head *list); | 200 | void *i40iw_remove_head(struct list_head *list); |
| 201 | void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend); | 201 | void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend); |
| 202 | void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); | ||
| 203 | 202 | ||
| 204 | void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len); | 203 | void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len); |
| 205 | void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred); | 204 | void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred); |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h index 7b76259752b0..959ec81fba99 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_type.h +++ b/drivers/infiniband/hw/i40iw/i40iw_type.h | |||
| @@ -541,7 +541,6 @@ struct i40iw_create_qp_info { | |||
| 541 | struct i40iw_modify_qp_info { | 541 | struct i40iw_modify_qp_info { |
| 542 | u64 rx_win0; | 542 | u64 rx_win0; |
| 543 | u64 rx_win1; | 543 | u64 rx_win1; |
| 544 | u16 new_mss; | ||
| 545 | u8 next_iwarp_state; | 544 | u8 next_iwarp_state; |
| 546 | u8 termlen; | 545 | u8 termlen; |
| 547 | bool ord_valid; | 546 | bool ord_valid; |
| @@ -554,7 +553,6 @@ struct i40iw_modify_qp_info { | |||
| 554 | bool dont_send_term; | 553 | bool dont_send_term; |
| 555 | bool dont_send_fin; | 554 | bool dont_send_fin; |
| 556 | bool cached_var_valid; | 555 | bool cached_var_valid; |
| 557 | bool mss_change; | ||
| 558 | bool force_loopback; | 556 | bool force_loopback; |
| 559 | }; | 557 | }; |
| 560 | 558 | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 409a3781e735..56d986924a4c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c | |||
| @@ -757,23 +757,6 @@ void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, b | |||
| 757 | } | 757 | } |
| 758 | 758 | ||
| 759 | /** | 759 | /** |
| 760 | * i40iw_qp_mss_modify - modify mss for qp | ||
| 761 | * @dev: hardware control device structure | ||
| 762 | * @qp: hardware control qp | ||
| 763 | */ | ||
| 764 | void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) | ||
| 765 | { | ||
| 766 | struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; | ||
| 767 | struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp; | ||
| 768 | struct i40iw_modify_qp_info info; | ||
| 769 | |||
| 770 | memset(&info, 0, sizeof(info)); | ||
| 771 | info.mss_change = true; | ||
| 772 | info.new_mss = qp->vsi->mss; | ||
| 773 | i40iw_hw_modify_qp(iwdev, iwqp, &info, false); | ||
| 774 | } | ||
| 775 | |||
| 776 | /** | ||
| 777 | * i40iw_term_modify_qp - modify qp for term message | 760 | * i40iw_term_modify_qp - modify qp for term message |
| 778 | * @qp: hardware control qp | 761 | * @qp: hardware control qp |
| 779 | * @next_state: qp's next state | 762 | * @next_state: qp's next state |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c index f4d13683a403..48fd327f876b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c | |||
| @@ -443,10 +443,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev, | |||
| 443 | if (!dev->vchnl_up) | 443 | if (!dev->vchnl_up) |
| 444 | return I40IW_ERR_NOT_READY; | 444 | return I40IW_ERR_NOT_READY; |
| 445 | if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) { | 445 | if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) { |
| 446 | if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0) | 446 | vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); |
| 447 | vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); | ||
| 448 | else | ||
| 449 | vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); | ||
| 450 | return I40IW_SUCCESS; | 447 | return I40IW_SUCCESS; |
| 451 | } | 448 | } |
| 452 | for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) { | 449 | for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) { |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index b4694717f6f3..21d31cb1325f 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
| @@ -1578,6 +1578,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc | |||
| 1578 | if (port < 0) | 1578 | if (port < 0) |
| 1579 | return; | 1579 | return; |
| 1580 | ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff)); | 1580 | ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff)); |
| 1581 | ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port); | ||
| 1581 | 1582 | ||
| 1582 | mlx4_ib_query_ah(&ah.ibah, &ah_attr); | 1583 | mlx4_ib_query_ah(&ah.ibah, &ah_attr); |
| 1583 | if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH) | 1584 | if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH) |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d45772da0963..9ecc089d4529 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -2979,6 +2979,18 @@ error_0: | |||
| 2979 | return ret; | 2979 | return ret; |
| 2980 | } | 2980 | } |
| 2981 | 2981 | ||
| 2982 | static u8 mlx5_get_umr_fence(u8 umr_fence_cap) | ||
| 2983 | { | ||
| 2984 | switch (umr_fence_cap) { | ||
| 2985 | case MLX5_CAP_UMR_FENCE_NONE: | ||
| 2986 | return MLX5_FENCE_MODE_NONE; | ||
| 2987 | case MLX5_CAP_UMR_FENCE_SMALL: | ||
| 2988 | return MLX5_FENCE_MODE_INITIATOR_SMALL; | ||
| 2989 | default: | ||
| 2990 | return MLX5_FENCE_MODE_STRONG_ORDERING; | ||
| 2991 | } | ||
| 2992 | } | ||
| 2993 | |||
| 2982 | static int create_dev_resources(struct mlx5_ib_resources *devr) | 2994 | static int create_dev_resources(struct mlx5_ib_resources *devr) |
| 2983 | { | 2995 | { |
| 2984 | struct ib_srq_init_attr attr; | 2996 | struct ib_srq_init_attr attr; |
| @@ -3680,8 +3692,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
| 3680 | dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; | 3692 | dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; |
| 3681 | dev->ib_dev.get_port_immutable = mlx5_port_immutable; | 3693 | dev->ib_dev.get_port_immutable = mlx5_port_immutable; |
| 3682 | dev->ib_dev.get_dev_fw_str = get_dev_fw_str; | 3694 | dev->ib_dev.get_dev_fw_str = get_dev_fw_str; |
| 3683 | dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; | 3695 | if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { |
| 3684 | dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; | 3696 | dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; |
| 3697 | dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; | ||
| 3698 | } | ||
| 3685 | if (mlx5_core_is_pf(mdev)) { | 3699 | if (mlx5_core_is_pf(mdev)) { |
| 3686 | dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; | 3700 | dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; |
| 3687 | dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; | 3701 | dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; |
| @@ -3693,6 +3707,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
| 3693 | 3707 | ||
| 3694 | mlx5_ib_internal_fill_odp_caps(dev); | 3708 | mlx5_ib_internal_fill_odp_caps(dev); |
| 3695 | 3709 | ||
| 3710 | dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence)); | ||
| 3711 | |||
| 3696 | if (MLX5_CAP_GEN(mdev, imaicl)) { | 3712 | if (MLX5_CAP_GEN(mdev, imaicl)) { |
| 3697 | dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; | 3713 | dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; |
| 3698 | dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; | 3714 | dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 38c877bc45e5..bdcf25410c99 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
| @@ -349,7 +349,7 @@ struct mlx5_ib_qp { | |||
| 349 | struct mlx5_ib_wq rq; | 349 | struct mlx5_ib_wq rq; |
| 350 | 350 | ||
| 351 | u8 sq_signal_bits; | 351 | u8 sq_signal_bits; |
| 352 | u8 fm_cache; | 352 | u8 next_fence; |
| 353 | struct mlx5_ib_wq sq; | 353 | struct mlx5_ib_wq sq; |
| 354 | 354 | ||
| 355 | /* serialize qp state modifications | 355 | /* serialize qp state modifications |
| @@ -654,6 +654,7 @@ struct mlx5_ib_dev { | |||
| 654 | struct mlx5_ib_port *port; | 654 | struct mlx5_ib_port *port; |
| 655 | struct mlx5_sq_bfreg bfreg; | 655 | struct mlx5_sq_bfreg bfreg; |
| 656 | struct mlx5_sq_bfreg fp_bfreg; | 656 | struct mlx5_sq_bfreg fp_bfreg; |
| 657 | u8 umr_fence; | ||
| 657 | }; | 658 | }; |
| 658 | 659 | ||
| 659 | static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) | 660 | static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 93959e1e43a3..ebb6768684de 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
| @@ -3738,24 +3738,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) | |||
| 3738 | } | 3738 | } |
| 3739 | } | 3739 | } |
| 3740 | 3740 | ||
| 3741 | static u8 get_fence(u8 fence, struct ib_send_wr *wr) | ||
| 3742 | { | ||
| 3743 | if (unlikely(wr->opcode == IB_WR_LOCAL_INV && | ||
| 3744 | wr->send_flags & IB_SEND_FENCE)) | ||
| 3745 | return MLX5_FENCE_MODE_STRONG_ORDERING; | ||
| 3746 | |||
| 3747 | if (unlikely(fence)) { | ||
| 3748 | if (wr->send_flags & IB_SEND_FENCE) | ||
| 3749 | return MLX5_FENCE_MODE_SMALL_AND_FENCE; | ||
| 3750 | else | ||
| 3751 | return fence; | ||
| 3752 | } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) { | ||
| 3753 | return MLX5_FENCE_MODE_FENCE; | ||
| 3754 | } | ||
| 3755 | |||
| 3756 | return 0; | ||
| 3757 | } | ||
| 3758 | |||
| 3759 | static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, | 3741 | static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, |
| 3760 | struct mlx5_wqe_ctrl_seg **ctrl, | 3742 | struct mlx5_wqe_ctrl_seg **ctrl, |
| 3761 | struct ib_send_wr *wr, unsigned *idx, | 3743 | struct ib_send_wr *wr, unsigned *idx, |
| @@ -3784,8 +3766,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, | |||
| 3784 | static void finish_wqe(struct mlx5_ib_qp *qp, | 3766 | static void finish_wqe(struct mlx5_ib_qp *qp, |
| 3785 | struct mlx5_wqe_ctrl_seg *ctrl, | 3767 | struct mlx5_wqe_ctrl_seg *ctrl, |
| 3786 | u8 size, unsigned idx, u64 wr_id, | 3768 | u8 size, unsigned idx, u64 wr_id, |
| 3787 | int nreq, u8 fence, u8 next_fence, | 3769 | int nreq, u8 fence, u32 mlx5_opcode) |
| 3788 | u32 mlx5_opcode) | ||
| 3789 | { | 3770 | { |
| 3790 | u8 opmod = 0; | 3771 | u8 opmod = 0; |
| 3791 | 3772 | ||
| @@ -3793,7 +3774,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp, | |||
| 3793 | mlx5_opcode | ((u32)opmod << 24)); | 3774 | mlx5_opcode | ((u32)opmod << 24)); |
| 3794 | ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); | 3775 | ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); |
| 3795 | ctrl->fm_ce_se |= fence; | 3776 | ctrl->fm_ce_se |= fence; |
| 3796 | qp->fm_cache = next_fence; | ||
| 3797 | if (unlikely(qp->wq_sig)) | 3777 | if (unlikely(qp->wq_sig)) |
| 3798 | ctrl->signature = wq_sig(ctrl); | 3778 | ctrl->signature = wq_sig(ctrl); |
| 3799 | 3779 | ||
| @@ -3853,7 +3833,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 3853 | goto out; | 3833 | goto out; |
| 3854 | } | 3834 | } |
| 3855 | 3835 | ||
| 3856 | fence = qp->fm_cache; | ||
| 3857 | num_sge = wr->num_sge; | 3836 | num_sge = wr->num_sge; |
| 3858 | if (unlikely(num_sge > qp->sq.max_gs)) { | 3837 | if (unlikely(num_sge > qp->sq.max_gs)) { |
| 3859 | mlx5_ib_warn(dev, "\n"); | 3838 | mlx5_ib_warn(dev, "\n"); |
| @@ -3870,6 +3849,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 3870 | goto out; | 3849 | goto out; |
| 3871 | } | 3850 | } |
| 3872 | 3851 | ||
| 3852 | if (wr->opcode == IB_WR_LOCAL_INV || | ||
| 3853 | wr->opcode == IB_WR_REG_MR) { | ||
| 3854 | fence = dev->umr_fence; | ||
| 3855 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; | ||
| 3856 | } else if (wr->send_flags & IB_SEND_FENCE) { | ||
| 3857 | if (qp->next_fence) | ||
| 3858 | fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; | ||
| 3859 | else | ||
| 3860 | fence = MLX5_FENCE_MODE_FENCE; | ||
| 3861 | } else { | ||
| 3862 | fence = qp->next_fence; | ||
| 3863 | } | ||
| 3864 | |||
| 3873 | switch (ibqp->qp_type) { | 3865 | switch (ibqp->qp_type) { |
| 3874 | case IB_QPT_XRC_INI: | 3866 | case IB_QPT_XRC_INI: |
| 3875 | xrc = seg; | 3867 | xrc = seg; |
| @@ -3896,7 +3888,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 3896 | goto out; | 3888 | goto out; |
| 3897 | 3889 | ||
| 3898 | case IB_WR_LOCAL_INV: | 3890 | case IB_WR_LOCAL_INV: |
| 3899 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; | ||
| 3900 | qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; | 3891 | qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; |
| 3901 | ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); | 3892 | ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); |
| 3902 | set_linv_wr(qp, &seg, &size); | 3893 | set_linv_wr(qp, &seg, &size); |
| @@ -3904,7 +3895,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 3904 | break; | 3895 | break; |
| 3905 | 3896 | ||
| 3906 | case IB_WR_REG_MR: | 3897 | case IB_WR_REG_MR: |
| 3907 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; | ||
| 3908 | qp->sq.wr_data[idx] = IB_WR_REG_MR; | 3898 | qp->sq.wr_data[idx] = IB_WR_REG_MR; |
| 3909 | ctrl->imm = cpu_to_be32(reg_wr(wr)->key); | 3899 | ctrl->imm = cpu_to_be32(reg_wr(wr)->key); |
| 3910 | err = set_reg_wr(qp, reg_wr(wr), &seg, &size); | 3900 | err = set_reg_wr(qp, reg_wr(wr), &seg, &size); |
| @@ -3927,9 +3917,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 3927 | goto out; | 3917 | goto out; |
| 3928 | } | 3918 | } |
| 3929 | 3919 | ||
| 3930 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, | 3920 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, |
| 3931 | nreq, get_fence(fence, wr), | 3921 | fence, MLX5_OPCODE_UMR); |
| 3932 | next_fence, MLX5_OPCODE_UMR); | ||
| 3933 | /* | 3922 | /* |
| 3934 | * SET_PSV WQEs are not signaled and solicited | 3923 | * SET_PSV WQEs are not signaled and solicited |
| 3935 | * on error | 3924 | * on error |
| @@ -3954,9 +3943,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 3954 | goto out; | 3943 | goto out; |
| 3955 | } | 3944 | } |
| 3956 | 3945 | ||
| 3957 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, | 3946 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, |
| 3958 | nreq, get_fence(fence, wr), | 3947 | fence, MLX5_OPCODE_SET_PSV); |
| 3959 | next_fence, MLX5_OPCODE_SET_PSV); | ||
| 3960 | err = begin_wqe(qp, &seg, &ctrl, wr, | 3948 | err = begin_wqe(qp, &seg, &ctrl, wr, |
| 3961 | &idx, &size, nreq); | 3949 | &idx, &size, nreq); |
| 3962 | if (err) { | 3950 | if (err) { |
| @@ -3966,7 +3954,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 3966 | goto out; | 3954 | goto out; |
| 3967 | } | 3955 | } |
| 3968 | 3956 | ||
| 3969 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; | ||
| 3970 | err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, | 3957 | err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, |
| 3971 | mr->sig->psv_wire.psv_idx, &seg, | 3958 | mr->sig->psv_wire.psv_idx, &seg, |
| 3972 | &size); | 3959 | &size); |
| @@ -3976,9 +3963,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 3976 | goto out; | 3963 | goto out; |
| 3977 | } | 3964 | } |
| 3978 | 3965 | ||
| 3979 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, | 3966 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, |
| 3980 | nreq, get_fence(fence, wr), | 3967 | fence, MLX5_OPCODE_SET_PSV); |
| 3981 | next_fence, MLX5_OPCODE_SET_PSV); | 3968 | qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; |
| 3982 | num_sge = 0; | 3969 | num_sge = 0; |
| 3983 | goto skip_psv; | 3970 | goto skip_psv; |
| 3984 | 3971 | ||
| @@ -4089,8 +4076,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 4089 | } | 4076 | } |
| 4090 | } | 4077 | } |
| 4091 | 4078 | ||
| 4092 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, | 4079 | qp->next_fence = next_fence; |
| 4093 | get_fence(fence, wr), next_fence, | 4080 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence, |
| 4094 | mlx5_ib_opcode[wr->opcode]); | 4081 | mlx5_ib_opcode[wr->opcode]); |
| 4095 | skip_psv: | 4082 | skip_psv: |
| 4096 | if (0) | 4083 | if (0) |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index fb983df7c157..30b256a2c54e 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
| @@ -610,7 +610,6 @@ static void build_mpa_v2(struct nes_cm_node *cm_node, | |||
| 610 | ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD; | 610 | ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD; |
| 611 | } | 611 | } |
| 612 | ctrl_ird |= IETF_PEER_TO_PEER; | 612 | ctrl_ird |= IETF_PEER_TO_PEER; |
| 613 | ctrl_ird |= IETF_FLPDU_ZERO_LEN; | ||
| 614 | 613 | ||
| 615 | switch (mpa_key) { | 614 | switch (mpa_key) { |
| 616 | case MPA_KEY_REQUEST: | 615 | case MPA_KEY_REQUEST: |
| @@ -1826,7 +1825,7 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
| 1826 | type = NES_CM_EVENT_CONNECTED; | 1825 | type = NES_CM_EVENT_CONNECTED; |
| 1827 | cm_node->state = NES_CM_STATE_TSA; | 1826 | cm_node->state = NES_CM_STATE_TSA; |
| 1828 | } | 1827 | } |
| 1829 | 1828 | send_ack(cm_node, NULL); | |
| 1830 | break; | 1829 | break; |
| 1831 | default: | 1830 | default: |
| 1832 | WARN_ON(1); | 1831 | WARN_ON(1); |
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index aa08c76a4245..d961f79b317c 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h | |||
| @@ -58,7 +58,10 @@ | |||
| 58 | #define QEDR_MSG_QP " QP" | 58 | #define QEDR_MSG_QP " QP" |
| 59 | #define QEDR_MSG_GSI " GSI" | 59 | #define QEDR_MSG_GSI " GSI" |
| 60 | 60 | ||
| 61 | #define QEDR_CQ_MAGIC_NUMBER (0x11223344) | 61 | #define QEDR_CQ_MAGIC_NUMBER (0x11223344) |
| 62 | |||
| 63 | #define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE) | ||
| 64 | #define FW_PAGE_SHIFT (12) | ||
| 62 | 65 | ||
| 63 | struct qedr_dev; | 66 | struct qedr_dev; |
| 64 | 67 | ||
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index 3d7705cec770..d86dbe814d98 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c | |||
| @@ -270,11 +270,13 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev, | |||
| 270 | return rc; | 270 | return rc; |
| 271 | } | 271 | } |
| 272 | 272 | ||
| 273 | vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); | 273 | if (sgid_attr.ndev) { |
| 274 | if (vlan_id < VLAN_CFI_MASK) | 274 | vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); |
| 275 | has_vlan = true; | 275 | if (vlan_id < VLAN_CFI_MASK) |
| 276 | if (sgid_attr.ndev) | 276 | has_vlan = true; |
| 277 | |||
| 277 | dev_put(sgid_attr.ndev); | 278 | dev_put(sgid_attr.ndev); |
| 279 | } | ||
| 278 | 280 | ||
| 279 | if (!memcmp(&sgid, &zgid, sizeof(sgid))) { | 281 | if (!memcmp(&sgid, &zgid, sizeof(sgid))) { |
| 280 | DP_ERR(dev, "gsi post send: GID not found GID index %d\n", | 282 | DP_ERR(dev, "gsi post send: GID not found GID index %d\n", |
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 17685cfea6a2..d6723c365c7f 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c | |||
| @@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev, | |||
| 653 | 653 | ||
| 654 | static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, | 654 | static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, |
| 655 | struct qedr_pbl *pbl, | 655 | struct qedr_pbl *pbl, |
| 656 | struct qedr_pbl_info *pbl_info) | 656 | struct qedr_pbl_info *pbl_info, u32 pg_shift) |
| 657 | { | 657 | { |
| 658 | int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; | 658 | int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; |
| 659 | u32 fw_pg_cnt, fw_pg_per_umem_pg; | ||
| 659 | struct qedr_pbl *pbl_tbl; | 660 | struct qedr_pbl *pbl_tbl; |
| 660 | struct scatterlist *sg; | 661 | struct scatterlist *sg; |
| 661 | struct regpair *pbe; | 662 | struct regpair *pbe; |
| 663 | u64 pg_addr; | ||
| 662 | int entry; | 664 | int entry; |
| 663 | u32 addr; | ||
| 664 | 665 | ||
| 665 | if (!pbl_info->num_pbes) | 666 | if (!pbl_info->num_pbes) |
| 666 | return; | 667 | return; |
| @@ -683,29 +684,35 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, | |||
| 683 | 684 | ||
| 684 | shift = umem->page_shift; | 685 | shift = umem->page_shift; |
| 685 | 686 | ||
| 687 | fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift); | ||
| 688 | |||
| 686 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { | 689 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
| 687 | pages = sg_dma_len(sg) >> shift; | 690 | pages = sg_dma_len(sg) >> shift; |
| 691 | pg_addr = sg_dma_address(sg); | ||
| 688 | for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { | 692 | for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { |
| 689 | /* store the page address in pbe */ | 693 | for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) { |
| 690 | pbe->lo = cpu_to_le32(sg_dma_address(sg) + | 694 | pbe->lo = cpu_to_le32(pg_addr); |
| 691 | (pg_cnt << shift)); | 695 | pbe->hi = cpu_to_le32(upper_32_bits(pg_addr)); |
| 692 | addr = upper_32_bits(sg_dma_address(sg) + | 696 | |
| 693 | (pg_cnt << shift)); | 697 | pg_addr += BIT(pg_shift); |
| 694 | pbe->hi = cpu_to_le32(addr); | 698 | pbe_cnt++; |
| 695 | pbe_cnt++; | 699 | total_num_pbes++; |
| 696 | total_num_pbes++; | 700 | pbe++; |
| 697 | pbe++; | 701 | |
| 698 | 702 | if (total_num_pbes == pbl_info->num_pbes) | |
| 699 | if (total_num_pbes == pbl_info->num_pbes) | 703 | return; |
| 700 | return; | 704 | |
| 701 | 705 | /* If the given pbl is full storing the pbes, | |
| 702 | /* If the given pbl is full storing the pbes, | 706 | * move to next pbl. |
| 703 | * move to next pbl. | 707 | */ |
| 704 | */ | 708 | if (pbe_cnt == |
| 705 | if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { | 709 | (pbl_info->pbl_size / sizeof(u64))) { |
| 706 | pbl_tbl++; | 710 | pbl_tbl++; |
| 707 | pbe = (struct regpair *)pbl_tbl->va; | 711 | pbe = (struct regpair *)pbl_tbl->va; |
| 708 | pbe_cnt = 0; | 712 | pbe_cnt = 0; |
| 713 | } | ||
| 714 | |||
| 715 | fw_pg_cnt++; | ||
| 709 | } | 716 | } |
| 710 | } | 717 | } |
| 711 | } | 718 | } |
| @@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, | |||
| 754 | u64 buf_addr, size_t buf_len, | 761 | u64 buf_addr, size_t buf_len, |
| 755 | int access, int dmasync) | 762 | int access, int dmasync) |
| 756 | { | 763 | { |
| 757 | int page_cnt; | 764 | u32 fw_pages; |
| 758 | int rc; | 765 | int rc; |
| 759 | 766 | ||
| 760 | q->buf_addr = buf_addr; | 767 | q->buf_addr = buf_addr; |
| @@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, | |||
| 766 | return PTR_ERR(q->umem); | 773 | return PTR_ERR(q->umem); |
| 767 | } | 774 | } |
| 768 | 775 | ||
| 769 | page_cnt = ib_umem_page_count(q->umem); | 776 | fw_pages = ib_umem_page_count(q->umem) << |
| 770 | rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0); | 777 | (q->umem->page_shift - FW_PAGE_SHIFT); |
| 778 | |||
| 779 | rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0); | ||
| 771 | if (rc) | 780 | if (rc) |
| 772 | goto err0; | 781 | goto err0; |
| 773 | 782 | ||
| @@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, | |||
| 777 | goto err0; | 786 | goto err0; |
| 778 | } | 787 | } |
| 779 | 788 | ||
| 780 | qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info); | 789 | qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info, |
| 790 | FW_PAGE_SHIFT); | ||
| 781 | 791 | ||
| 782 | return 0; | 792 | return 0; |
| 783 | 793 | ||
| @@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, | |||
| 2226 | goto err1; | 2236 | goto err1; |
| 2227 | 2237 | ||
| 2228 | qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, | 2238 | qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, |
| 2229 | &mr->info.pbl_info); | 2239 | &mr->info.pbl_info, mr->umem->page_shift); |
| 2230 | 2240 | ||
| 2231 | rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); | 2241 | rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); |
| 2232 | if (rc) { | 2242 | if (rc) { |
| @@ -3209,6 +3219,10 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp, | |||
| 3209 | case IB_WC_REG_MR: | 3219 | case IB_WC_REG_MR: |
| 3210 | qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; | 3220 | qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; |
| 3211 | break; | 3221 | break; |
| 3222 | case IB_WC_RDMA_READ: | ||
| 3223 | case IB_WC_SEND: | ||
| 3224 | wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len; | ||
| 3225 | break; | ||
| 3212 | default: | 3226 | default: |
| 3213 | break; | 3227 | break; |
| 3214 | } | 3228 | } |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index fc8b88514da5..4ddbcac5eabe 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
| @@ -1956,8 +1956,10 @@ send_last: | |||
| 1956 | ret = qib_get_rwqe(qp, 1); | 1956 | ret = qib_get_rwqe(qp, 1); |
| 1957 | if (ret < 0) | 1957 | if (ret < 0) |
| 1958 | goto nack_op_err; | 1958 | goto nack_op_err; |
| 1959 | if (!ret) | 1959 | if (!ret) { |
| 1960 | rvt_put_ss(&qp->r_sge); | ||
| 1960 | goto rnr_nak; | 1961 | goto rnr_nak; |
| 1962 | } | ||
| 1961 | wc.ex.imm_data = ohdr->u.rc.imm_data; | 1963 | wc.ex.imm_data = ohdr->u.rc.imm_data; |
| 1962 | hdrsize += 4; | 1964 | hdrsize += 4; |
| 1963 | wc.wc_flags = IB_WC_WITH_IMM; | 1965 | wc.wc_flags = IB_WC_WITH_IMM; |
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index ecdba2fce083..1ac5b8551a4d 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h | |||
| @@ -68,6 +68,7 @@ | |||
| 68 | static inline u32 rxe_crc32(struct rxe_dev *rxe, | 68 | static inline u32 rxe_crc32(struct rxe_dev *rxe, |
| 69 | u32 crc, void *next, size_t len) | 69 | u32 crc, void *next, size_t len) |
| 70 | { | 70 | { |
| 71 | u32 retval; | ||
| 71 | int err; | 72 | int err; |
| 72 | 73 | ||
| 73 | SHASH_DESC_ON_STACK(shash, rxe->tfm); | 74 | SHASH_DESC_ON_STACK(shash, rxe->tfm); |
| @@ -81,7 +82,9 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe, | |||
| 81 | return crc32_le(crc, next, len); | 82 | return crc32_le(crc, next, len); |
| 82 | } | 83 | } |
| 83 | 84 | ||
| 84 | return *(u32 *)shash_desc_ctx(shash); | 85 | retval = *(u32 *)shash_desc_ctx(shash); |
| 86 | barrier_data(shash_desc_ctx(shash)); | ||
| 87 | return retval; | ||
| 85 | } | 88 | } |
| 86 | 89 | ||
| 87 | int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); | 90 | int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); |
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 83d709e74dfb..073e66783f1d 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c | |||
| @@ -740,13 +740,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, | |||
| 740 | 740 | ||
| 741 | sge = ibwr->sg_list; | 741 | sge = ibwr->sg_list; |
| 742 | for (i = 0; i < num_sge; i++, sge++) { | 742 | for (i = 0; i < num_sge; i++, sge++) { |
| 743 | if (qp->is_user && copy_from_user(p, (__user void *) | 743 | memcpy(p, (void *)(uintptr_t)sge->addr, |
| 744 | (uintptr_t)sge->addr, sge->length)) | 744 | sge->length); |
| 745 | return -EFAULT; | ||
| 746 | |||
| 747 | else if (!qp->is_user) | ||
| 748 | memcpy(p, (void *)(uintptr_t)sge->addr, | ||
| 749 | sge->length); | ||
| 750 | 745 | ||
| 751 | p += sge->length; | 746 | p += sge->length; |
| 752 | } | 747 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 874b24366e4d..7871379342f4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c | |||
| @@ -178,7 +178,7 @@ static inline int ib_speed_enum_to_int(int speed) | |||
| 178 | static int ipoib_get_link_ksettings(struct net_device *netdev, | 178 | static int ipoib_get_link_ksettings(struct net_device *netdev, |
| 179 | struct ethtool_link_ksettings *cmd) | 179 | struct ethtool_link_ksettings *cmd) |
| 180 | { | 180 | { |
| 181 | struct ipoib_dev_priv *priv = netdev_priv(netdev); | 181 | struct ipoib_dev_priv *priv = ipoib_priv(netdev); |
| 182 | struct ib_port_attr attr; | 182 | struct ib_port_attr attr; |
| 183 | int ret, speed, width; | 183 | int ret, speed, width; |
| 184 | 184 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 0060b2f9f659..efe7402f4885 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -863,7 +863,6 @@ dev_stop: | |||
| 863 | set_bit(IPOIB_STOP_REAPER, &priv->flags); | 863 | set_bit(IPOIB_STOP_REAPER, &priv->flags); |
| 864 | cancel_delayed_work(&priv->ah_reap_task); | 864 | cancel_delayed_work(&priv->ah_reap_task); |
| 865 | set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); | 865 | set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); |
| 866 | napi_enable(&priv->napi); | ||
| 867 | ipoib_ib_dev_stop(dev); | 866 | ipoib_ib_dev_stop(dev); |
| 868 | return -1; | 867 | return -1; |
| 869 | } | 868 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 2869d1adb1de..1015a63de6ae 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -1590,12 +1590,14 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev) | |||
| 1590 | wait_for_completion(&priv->ntbl.deleted); | 1590 | wait_for_completion(&priv->ntbl.deleted); |
| 1591 | } | 1591 | } |
| 1592 | 1592 | ||
| 1593 | void ipoib_dev_uninit_default(struct net_device *dev) | 1593 | static void ipoib_dev_uninit_default(struct net_device *dev) |
| 1594 | { | 1594 | { |
| 1595 | struct ipoib_dev_priv *priv = ipoib_priv(dev); | 1595 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
| 1596 | 1596 | ||
| 1597 | ipoib_transport_dev_cleanup(dev); | 1597 | ipoib_transport_dev_cleanup(dev); |
| 1598 | 1598 | ||
| 1599 | netif_napi_del(&priv->napi); | ||
| 1600 | |||
| 1599 | ipoib_cm_dev_cleanup(dev); | 1601 | ipoib_cm_dev_cleanup(dev); |
| 1600 | 1602 | ||
| 1601 | kfree(priv->rx_ring); | 1603 | kfree(priv->rx_ring); |
| @@ -1649,6 +1651,7 @@ out_rx_ring_cleanup: | |||
| 1649 | kfree(priv->rx_ring); | 1651 | kfree(priv->rx_ring); |
| 1650 | 1652 | ||
| 1651 | out: | 1653 | out: |
| 1654 | netif_napi_del(&priv->napi); | ||
| 1652 | return -ENOMEM; | 1655 | return -ENOMEM; |
| 1653 | } | 1656 | } |
| 1654 | 1657 | ||
| @@ -2237,6 +2240,7 @@ event_failed: | |||
| 2237 | 2240 | ||
| 2238 | device_init_failed: | 2241 | device_init_failed: |
| 2239 | free_netdev(priv->dev); | 2242 | free_netdev(priv->dev); |
| 2243 | kfree(priv); | ||
| 2240 | 2244 | ||
| 2241 | alloc_mem_failed: | 2245 | alloc_mem_failed: |
| 2242 | return ERR_PTR(result); | 2246 | return ERR_PTR(result); |
| @@ -2277,7 +2281,7 @@ static void ipoib_add_one(struct ib_device *device) | |||
| 2277 | 2281 | ||
| 2278 | static void ipoib_remove_one(struct ib_device *device, void *client_data) | 2282 | static void ipoib_remove_one(struct ib_device *device, void *client_data) |
| 2279 | { | 2283 | { |
| 2280 | struct ipoib_dev_priv *priv, *tmp; | 2284 | struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv; |
| 2281 | struct list_head *dev_list = client_data; | 2285 | struct list_head *dev_list = client_data; |
| 2282 | 2286 | ||
| 2283 | if (!dev_list) | 2287 | if (!dev_list) |
| @@ -2300,7 +2304,14 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) | |||
| 2300 | flush_workqueue(priv->wq); | 2304 | flush_workqueue(priv->wq); |
| 2301 | 2305 | ||
| 2302 | unregister_netdev(priv->dev); | 2306 | unregister_netdev(priv->dev); |
| 2303 | free_netdev(priv->dev); | 2307 | if (device->free_rdma_netdev) |
| 2308 | device->free_rdma_netdev(priv->dev); | ||
| 2309 | else | ||
| 2310 | free_netdev(priv->dev); | ||
| 2311 | |||
| 2312 | list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) | ||
| 2313 | kfree(cpriv); | ||
| 2314 | |||
| 2304 | kfree(priv); | 2315 | kfree(priv); |
| 2305 | } | 2316 | } |
| 2306 | 2317 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 36dc4fcaa3cd..081b33deff1b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
| @@ -133,13 +133,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
| 133 | snprintf(intf_name, sizeof intf_name, "%s.%04x", | 133 | snprintf(intf_name, sizeof intf_name, "%s.%04x", |
| 134 | ppriv->dev->name, pkey); | 134 | ppriv->dev->name, pkey); |
| 135 | 135 | ||
| 136 | if (!rtnl_trylock()) | ||
| 137 | return restart_syscall(); | ||
| 138 | |||
| 136 | priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); | 139 | priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); |
| 137 | if (!priv) | 140 | if (!priv) |
| 138 | return -ENOMEM; | 141 | return -ENOMEM; |
| 139 | 142 | ||
| 140 | if (!rtnl_trylock()) | ||
| 141 | return restart_syscall(); | ||
| 142 | |||
| 143 | down_write(&ppriv->vlan_rwsem); | 143 | down_write(&ppriv->vlan_rwsem); |
| 144 | 144 | ||
| 145 | /* | 145 | /* |
| @@ -167,8 +167,10 @@ out: | |||
| 167 | 167 | ||
| 168 | rtnl_unlock(); | 168 | rtnl_unlock(); |
| 169 | 169 | ||
| 170 | if (result) | 170 | if (result) { |
| 171 | free_netdev(priv->dev); | 171 | free_netdev(priv->dev); |
| 172 | kfree(priv); | ||
| 173 | } | ||
| 172 | 174 | ||
| 173 | return result; | 175 | return result; |
| 174 | } | 176 | } |
| @@ -209,6 +211,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) | |||
| 209 | 211 | ||
| 210 | if (dev) { | 212 | if (dev) { |
| 211 | free_netdev(dev); | 213 | free_netdev(dev); |
| 214 | kfree(priv); | ||
| 212 | return 0; | 215 | return 0; |
| 213 | } | 216 | } |
| 214 | 217 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index def723a5df29..2354c742caa1 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
| @@ -320,7 +320,7 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch) | |||
| 320 | ch->path.sgid = target->sgid; | 320 | ch->path.sgid = target->sgid; |
| 321 | ch->path.dgid = target->orig_dgid; | 321 | ch->path.dgid = target->orig_dgid; |
| 322 | ch->path.pkey = target->pkey; | 322 | ch->path.pkey = target->pkey; |
| 323 | sa_path_set_service_id(&ch->path, target->service_id); | 323 | ch->path.service_id = target->service_id; |
| 324 | 324 | ||
| 325 | return 0; | 325 | return 0; |
| 326 | } | 326 | } |
| @@ -575,7 +575,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) | |||
| 575 | return 0; | 575 | return 0; |
| 576 | 576 | ||
| 577 | err_qp: | 577 | err_qp: |
| 578 | srp_destroy_qp(ch, qp); | 578 | ib_destroy_qp(qp); |
| 579 | 579 | ||
| 580 | err_send_cq: | 580 | err_send_cq: |
| 581 | ib_free_cq(send_cq); | 581 | ib_free_cq(send_cq); |
diff --git a/drivers/input/input.c b/drivers/input/input.c index 7e6842bd525c..d268fdc23c64 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
| @@ -1398,7 +1398,7 @@ static struct attribute *input_dev_attrs[] = { | |||
| 1398 | NULL | 1398 | NULL |
| 1399 | }; | 1399 | }; |
| 1400 | 1400 | ||
| 1401 | static struct attribute_group input_dev_attr_group = { | 1401 | static const struct attribute_group input_dev_attr_group = { |
| 1402 | .attrs = input_dev_attrs, | 1402 | .attrs = input_dev_attrs, |
| 1403 | }; | 1403 | }; |
| 1404 | 1404 | ||
| @@ -1425,7 +1425,7 @@ static struct attribute *input_dev_id_attrs[] = { | |||
| 1425 | NULL | 1425 | NULL |
| 1426 | }; | 1426 | }; |
| 1427 | 1427 | ||
| 1428 | static struct attribute_group input_dev_id_attr_group = { | 1428 | static const struct attribute_group input_dev_id_attr_group = { |
| 1429 | .name = "id", | 1429 | .name = "id", |
| 1430 | .attrs = input_dev_id_attrs, | 1430 | .attrs = input_dev_id_attrs, |
| 1431 | }; | 1431 | }; |
| @@ -1495,7 +1495,7 @@ static struct attribute *input_dev_caps_attrs[] = { | |||
| 1495 | NULL | 1495 | NULL |
| 1496 | }; | 1496 | }; |
| 1497 | 1497 | ||
| 1498 | static struct attribute_group input_dev_caps_attr_group = { | 1498 | static const struct attribute_group input_dev_caps_attr_group = { |
| 1499 | .name = "capabilities", | 1499 | .name = "capabilities", |
| 1500 | .attrs = input_dev_caps_attrs, | 1500 | .attrs = input_dev_caps_attrs, |
| 1501 | }; | 1501 | }; |
diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c index 46d5041d2d9d..154e827b559b 100644 --- a/drivers/input/joystick/iforce/iforce-serio.c +++ b/drivers/input/joystick/iforce/iforce-serio.c | |||
| @@ -164,7 +164,7 @@ static void iforce_serio_disconnect(struct serio *serio) | |||
| 164 | kfree(iforce); | 164 | kfree(iforce); |
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | static struct serio_device_id iforce_serio_ids[] = { | 167 | static const struct serio_device_id iforce_serio_ids[] = { |
| 168 | { | 168 | { |
| 169 | .type = SERIO_RS232, | 169 | .type = SERIO_RS232, |
| 170 | .proto = SERIO_IFORCE, | 170 | .proto = SERIO_IFORCE, |
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index db64adfbe1af..e8724f1a4a25 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c | |||
| @@ -209,7 +209,7 @@ static void iforce_usb_disconnect(struct usb_interface *intf) | |||
| 209 | kfree(iforce); | 209 | kfree(iforce); |
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | static struct usb_device_id iforce_usb_ids [] = { | 212 | static const struct usb_device_id iforce_usb_ids[] = { |
| 213 | { USB_DEVICE(0x044f, 0xa01c) }, /* Thrustmaster Motor Sport GT */ | 213 | { USB_DEVICE(0x044f, 0xa01c) }, /* Thrustmaster Motor Sport GT */ |
| 214 | { USB_DEVICE(0x046d, 0xc281) }, /* Logitech WingMan Force */ | 214 | { USB_DEVICE(0x046d, 0xc281) }, /* Logitech WingMan Force */ |
| 215 | { USB_DEVICE(0x046d, 0xc291) }, /* Logitech WingMan Formula Force */ | 215 | { USB_DEVICE(0x046d, 0xc291) }, /* Logitech WingMan Formula Force */ |
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c index c5358ba1f571..a9d0e3edca94 100644 --- a/drivers/input/joystick/magellan.c +++ b/drivers/input/joystick/magellan.c | |||
| @@ -198,7 +198,7 @@ static int magellan_connect(struct serio *serio, struct serio_driver *drv) | |||
| 198 | * The serio driver structure. | 198 | * The serio driver structure. |
| 199 | */ | 199 | */ |
| 200 | 200 | ||
| 201 | static struct serio_device_id magellan_serio_ids[] = { | 201 | static const struct serio_device_id magellan_serio_ids[] = { |
| 202 | { | 202 | { |
| 203 | .type = SERIO_RS232, | 203 | .type = SERIO_RS232, |
| 204 | .proto = SERIO_MAGELLAN, | 204 | .proto = SERIO_MAGELLAN, |
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c index f4445a4e8d6a..e9712a1b7cad 100644 --- a/drivers/input/joystick/spaceball.c +++ b/drivers/input/joystick/spaceball.c | |||
| @@ -272,7 +272,7 @@ static int spaceball_connect(struct serio *serio, struct serio_driver *drv) | |||
| 272 | * The serio driver structure. | 272 | * The serio driver structure. |
| 273 | */ | 273 | */ |
| 274 | 274 | ||
| 275 | static struct serio_device_id spaceball_serio_ids[] = { | 275 | static const struct serio_device_id spaceball_serio_ids[] = { |
| 276 | { | 276 | { |
| 277 | .type = SERIO_RS232, | 277 | .type = SERIO_RS232, |
| 278 | .proto = SERIO_SPACEBALL, | 278 | .proto = SERIO_SPACEBALL, |
diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c index f2667820e8c5..05da0ed514e2 100644 --- a/drivers/input/joystick/spaceorb.c +++ b/drivers/input/joystick/spaceorb.c | |||
| @@ -213,7 +213,7 @@ static int spaceorb_connect(struct serio *serio, struct serio_driver *drv) | |||
| 213 | * The serio driver structure. | 213 | * The serio driver structure. |
| 214 | */ | 214 | */ |
| 215 | 215 | ||
| 216 | static struct serio_device_id spaceorb_serio_ids[] = { | 216 | static const struct serio_device_id spaceorb_serio_ids[] = { |
| 217 | { | 217 | { |
| 218 | .type = SERIO_RS232, | 218 | .type = SERIO_RS232, |
| 219 | .proto = SERIO_SPACEORB, | 219 | .proto = SERIO_SPACEORB, |
diff --git a/drivers/input/joystick/stinger.c b/drivers/input/joystick/stinger.c index 099c6d7b5e08..cb10e7b097ae 100644 --- a/drivers/input/joystick/stinger.c +++ b/drivers/input/joystick/stinger.c | |||
| @@ -184,7 +184,7 @@ static int stinger_connect(struct serio *serio, struct serio_driver *drv) | |||
| 184 | * The serio driver structure. | 184 | * The serio driver structure. |
| 185 | */ | 185 | */ |
| 186 | 186 | ||
| 187 | static struct serio_device_id stinger_serio_ids[] = { | 187 | static const struct serio_device_id stinger_serio_ids[] = { |
| 188 | { | 188 | { |
| 189 | .type = SERIO_RS232, | 189 | .type = SERIO_RS232, |
| 190 | .proto = SERIO_STINGER, | 190 | .proto = SERIO_STINGER, |
diff --git a/drivers/input/joystick/twidjoy.c b/drivers/input/joystick/twidjoy.c index 7f7e5ab3f9e3..e60cb004cb8c 100644 --- a/drivers/input/joystick/twidjoy.c +++ b/drivers/input/joystick/twidjoy.c | |||
| @@ -233,7 +233,7 @@ static int twidjoy_connect(struct serio *serio, struct serio_driver *drv) | |||
| 233 | * The serio driver structure. | 233 | * The serio driver structure. |
| 234 | */ | 234 | */ |
| 235 | 235 | ||
| 236 | static struct serio_device_id twidjoy_serio_ids[] = { | 236 | static const struct serio_device_id twidjoy_serio_ids[] = { |
| 237 | { | 237 | { |
| 238 | .type = SERIO_RS232, | 238 | .type = SERIO_RS232, |
| 239 | .proto = SERIO_TWIDJOY, | 239 | .proto = SERIO_TWIDJOY, |
diff --git a/drivers/input/joystick/warrior.c b/drivers/input/joystick/warrior.c index e13a9144a25d..ef5391ba4470 100644 --- a/drivers/input/joystick/warrior.c +++ b/drivers/input/joystick/warrior.c | |||
| @@ -193,7 +193,7 @@ static int warrior_connect(struct serio *serio, struct serio_driver *drv) | |||
| 193 | * The serio driver structure. | 193 | * The serio driver structure. |
| 194 | */ | 194 | */ |
| 195 | 195 | ||
| 196 | static struct serio_device_id warrior_serio_ids[] = { | 196 | static const struct serio_device_id warrior_serio_ids[] = { |
| 197 | { | 197 | { |
| 198 | .type = SERIO_RS232, | 198 | .type = SERIO_RS232, |
| 199 | .proto = SERIO_WARRIOR, | 199 | .proto = SERIO_WARRIOR, |
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index ca0e19ae7a90..f8e34ef643c7 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c | |||
| @@ -408,7 +408,7 @@ static const signed short xpad_abs_triggers[] = { | |||
| 408 | #define XPAD_XBOXONE_VENDOR(vend) \ | 408 | #define XPAD_XBOXONE_VENDOR(vend) \ |
| 409 | { XPAD_XBOXONE_VENDOR_PROTOCOL(vend, 208) } | 409 | { XPAD_XBOXONE_VENDOR_PROTOCOL(vend, 208) } |
| 410 | 410 | ||
| 411 | static struct usb_device_id xpad_table[] = { | 411 | static const struct usb_device_id xpad_table[] = { |
| 412 | { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */ | 412 | { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */ |
| 413 | XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */ | 413 | XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */ |
| 414 | XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */ | 414 | XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */ |
diff --git a/drivers/input/joystick/zhenhua.c b/drivers/input/joystick/zhenhua.c index 4a8258bf13fd..5c6d5de743f1 100644 --- a/drivers/input/joystick/zhenhua.c +++ b/drivers/input/joystick/zhenhua.c | |||
| @@ -192,7 +192,7 @@ static int zhenhua_connect(struct serio *serio, struct serio_driver *drv) | |||
| 192 | * The serio driver structure. | 192 | * The serio driver structure. |
| 193 | */ | 193 | */ |
| 194 | 194 | ||
| 195 | static struct serio_device_id zhenhua_serio_ids[] = { | 195 | static const struct serio_device_id zhenhua_serio_ids[] = { |
| 196 | { | 196 | { |
| 197 | .type = SERIO_RS232, | 197 | .type = SERIO_RS232, |
| 198 | .proto = SERIO_ZHENHUA, | 198 | .proto = SERIO_ZHENHUA, |
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index ec876b5b1382..7e75835e220f 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c | |||
| @@ -1270,7 +1270,7 @@ static int atkbd_reconnect(struct serio *serio) | |||
| 1270 | return retval; | 1270 | return retval; |
| 1271 | } | 1271 | } |
| 1272 | 1272 | ||
| 1273 | static struct serio_device_id atkbd_serio_ids[] = { | 1273 | static const struct serio_device_id atkbd_serio_ids[] = { |
| 1274 | { | 1274 | { |
| 1275 | .type = SERIO_8042, | 1275 | .type = SERIO_8042, |
| 1276 | .proto = SERIO_ANY, | 1276 | .proto = SERIO_ANY, |
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index a047b9af8369..e9f0ebf3267a 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c | |||
| @@ -353,7 +353,7 @@ static struct attribute *gpio_keys_attrs[] = { | |||
| 353 | NULL, | 353 | NULL, |
| 354 | }; | 354 | }; |
| 355 | 355 | ||
| 356 | static struct attribute_group gpio_keys_attr_group = { | 356 | static const struct attribute_group gpio_keys_attr_group = { |
| 357 | .attrs = gpio_keys_attrs, | 357 | .attrs = gpio_keys_attrs, |
| 358 | }; | 358 | }; |
| 359 | 359 | ||
| @@ -827,7 +827,7 @@ static int gpio_keys_probe(struct platform_device *pdev) | |||
| 827 | 827 | ||
| 828 | fwnode_handle_put(child); | 828 | fwnode_handle_put(child); |
| 829 | 829 | ||
| 830 | error = sysfs_create_group(&dev->kobj, &gpio_keys_attr_group); | 830 | error = devm_device_add_group(dev, &gpio_keys_attr_group); |
| 831 | if (error) { | 831 | if (error) { |
| 832 | dev_err(dev, "Unable to export keys/switches, error: %d\n", | 832 | dev_err(dev, "Unable to export keys/switches, error: %d\n", |
| 833 | error); | 833 | error); |
| @@ -838,23 +838,12 @@ static int gpio_keys_probe(struct platform_device *pdev) | |||
| 838 | if (error) { | 838 | if (error) { |
| 839 | dev_err(dev, "Unable to register input device, error: %d\n", | 839 | dev_err(dev, "Unable to register input device, error: %d\n", |
| 840 | error); | 840 | error); |
| 841 | goto err_remove_group; | 841 | return error; |
| 842 | } | 842 | } |
| 843 | 843 | ||
| 844 | device_init_wakeup(dev, wakeup); | 844 | device_init_wakeup(dev, wakeup); |
| 845 | 845 | ||
| 846 | return 0; | 846 | return 0; |
| 847 | |||
| 848 | err_remove_group: | ||
| 849 | sysfs_remove_group(&dev->kobj, &gpio_keys_attr_group); | ||
| 850 | return error; | ||
| 851 | } | ||
| 852 | |||
| 853 | static int gpio_keys_remove(struct platform_device *pdev) | ||
| 854 | { | ||
| 855 | sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group); | ||
| 856 | |||
| 857 | return 0; | ||
| 858 | } | 847 | } |
| 859 | 848 | ||
| 860 | static int __maybe_unused gpio_keys_suspend(struct device *dev) | 849 | static int __maybe_unused gpio_keys_suspend(struct device *dev) |
| @@ -912,7 +901,6 @@ static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume); | |||
| 912 | 901 | ||
| 913 | static struct platform_driver gpio_keys_device_driver = { | 902 | static struct platform_driver gpio_keys_device_driver = { |
| 914 | .probe = gpio_keys_probe, | 903 | .probe = gpio_keys_probe, |
| 915 | .remove = gpio_keys_remove, | ||
| 916 | .driver = { | 904 | .driver = { |
| 917 | .name = "gpio-keys", | 905 | .name = "gpio-keys", |
| 918 | .pm = &gpio_keys_pm_ops, | 906 | .pm = &gpio_keys_pm_ops, |
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c index 5b152f25a8e1..bb29a7c9a1c0 100644 --- a/drivers/input/keyboard/hil_kbd.c +++ b/drivers/input/keyboard/hil_kbd.c | |||
| @@ -559,7 +559,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv) | |||
| 559 | return error; | 559 | return error; |
| 560 | } | 560 | } |
| 561 | 561 | ||
| 562 | static struct serio_device_id hil_dev_ids[] = { | 562 | static const struct serio_device_id hil_dev_ids[] = { |
| 563 | { | 563 | { |
| 564 | .type = SERIO_HIL_MLC, | 564 | .type = SERIO_HIL_MLC, |
| 565 | .proto = SERIO_HIL, | 565 | .proto = SERIO_HIL, |
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c index 9fcd9f1d5dc8..471d53815c6d 100644 --- a/drivers/input/keyboard/lkkbd.c +++ b/drivers/input/keyboard/lkkbd.c | |||
| @@ -707,7 +707,7 @@ static void lkkbd_disconnect(struct serio *serio) | |||
| 707 | kfree(lk); | 707 | kfree(lk); |
| 708 | } | 708 | } |
| 709 | 709 | ||
| 710 | static struct serio_device_id lkkbd_serio_ids[] = { | 710 | static const struct serio_device_id lkkbd_serio_ids[] = { |
| 711 | { | 711 | { |
| 712 | .type = SERIO_RS232, | 712 | .type = SERIO_RS232, |
| 713 | .proto = SERIO_LKKBD, | 713 | .proto = SERIO_LKKBD, |
diff --git a/drivers/input/keyboard/newtonkbd.c b/drivers/input/keyboard/newtonkbd.c index 20f044377990..fb9b8e23ab93 100644 --- a/drivers/input/keyboard/newtonkbd.c +++ b/drivers/input/keyboard/newtonkbd.c | |||
| @@ -142,7 +142,7 @@ static void nkbd_disconnect(struct serio *serio) | |||
| 142 | kfree(nkbd); | 142 | kfree(nkbd); |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | static struct serio_device_id nkbd_serio_ids[] = { | 145 | static const struct serio_device_id nkbd_serio_ids[] = { |
| 146 | { | 146 | { |
| 147 | .type = SERIO_RS232, | 147 | .type = SERIO_RS232, |
| 148 | .proto = SERIO_NEWTON, | 148 | .proto = SERIO_NEWTON, |
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c index 3841fa30db33..d0bdaeadf86d 100644 --- a/drivers/input/keyboard/pxa27x_keypad.c +++ b/drivers/input/keyboard/pxa27x_keypad.c | |||
| @@ -644,9 +644,12 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad) | |||
| 644 | static int pxa27x_keypad_open(struct input_dev *dev) | 644 | static int pxa27x_keypad_open(struct input_dev *dev) |
| 645 | { | 645 | { |
| 646 | struct pxa27x_keypad *keypad = input_get_drvdata(dev); | 646 | struct pxa27x_keypad *keypad = input_get_drvdata(dev); |
| 647 | 647 | int ret; | |
| 648 | /* Enable unit clock */ | 648 | /* Enable unit clock */ |
| 649 | clk_prepare_enable(keypad->clk); | 649 | ret = clk_prepare_enable(keypad->clk); |
| 650 | if (ret) | ||
| 651 | return ret; | ||
| 652 | |||
| 650 | pxa27x_keypad_config(keypad); | 653 | pxa27x_keypad_config(keypad); |
| 651 | 654 | ||
| 652 | return 0; | 655 | return 0; |
| @@ -683,6 +686,7 @@ static int pxa27x_keypad_resume(struct device *dev) | |||
| 683 | struct platform_device *pdev = to_platform_device(dev); | 686 | struct platform_device *pdev = to_platform_device(dev); |
| 684 | struct pxa27x_keypad *keypad = platform_get_drvdata(pdev); | 687 | struct pxa27x_keypad *keypad = platform_get_drvdata(pdev); |
| 685 | struct input_dev *input_dev = keypad->input_dev; | 688 | struct input_dev *input_dev = keypad->input_dev; |
| 689 | int ret = 0; | ||
| 686 | 690 | ||
| 687 | /* | 691 | /* |
| 688 | * If the keypad is used as wake up source, the clock is not turned | 692 | * If the keypad is used as wake up source, the clock is not turned |
| @@ -695,14 +699,15 @@ static int pxa27x_keypad_resume(struct device *dev) | |||
| 695 | 699 | ||
| 696 | if (input_dev->users) { | 700 | if (input_dev->users) { |
| 697 | /* Enable unit clock */ | 701 | /* Enable unit clock */ |
| 698 | clk_prepare_enable(keypad->clk); | 702 | ret = clk_prepare_enable(keypad->clk); |
| 699 | pxa27x_keypad_config(keypad); | 703 | if (!ret) |
| 704 | pxa27x_keypad_config(keypad); | ||
| 700 | } | 705 | } |
| 701 | 706 | ||
| 702 | mutex_unlock(&input_dev->mutex); | 707 | mutex_unlock(&input_dev->mutex); |
| 703 | } | 708 | } |
| 704 | 709 | ||
| 705 | return 0; | 710 | return ret; |
| 706 | } | 711 | } |
| 707 | #endif | 712 | #endif |
| 708 | 713 | ||
diff --git a/drivers/input/keyboard/stowaway.c b/drivers/input/keyboard/stowaway.c index a6e0d565e306..8b6de9a692dc 100644 --- a/drivers/input/keyboard/stowaway.c +++ b/drivers/input/keyboard/stowaway.c | |||
| @@ -146,7 +146,7 @@ static void skbd_disconnect(struct serio *serio) | |||
| 146 | kfree(skbd); | 146 | kfree(skbd); |
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | static struct serio_device_id skbd_serio_ids[] = { | 149 | static const struct serio_device_id skbd_serio_ids[] = { |
| 150 | { | 150 | { |
| 151 | .type = SERIO_RS232, | 151 | .type = SERIO_RS232, |
| 152 | .proto = SERIO_STOWAWAY, | 152 | .proto = SERIO_STOWAWAY, |
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c index dc6bb9d5b4f0..c95707ea2656 100644 --- a/drivers/input/keyboard/sunkbd.c +++ b/drivers/input/keyboard/sunkbd.c | |||
| @@ -339,7 +339,7 @@ static void sunkbd_disconnect(struct serio *serio) | |||
| 339 | kfree(sunkbd); | 339 | kfree(sunkbd); |
| 340 | } | 340 | } |
| 341 | 341 | ||
| 342 | static struct serio_device_id sunkbd_serio_ids[] = { | 342 | static const struct serio_device_id sunkbd_serio_ids[] = { |
| 343 | { | 343 | { |
| 344 | .type = SERIO_RS232, | 344 | .type = SERIO_RS232, |
| 345 | .proto = SERIO_SUNKBD, | 345 | .proto = SERIO_SUNKBD, |
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index 0c07e1023a46..edc1385ca00b 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c | |||
| @@ -370,8 +370,11 @@ static int tegra_kbc_start(struct tegra_kbc *kbc) | |||
| 370 | { | 370 | { |
| 371 | unsigned int debounce_cnt; | 371 | unsigned int debounce_cnt; |
| 372 | u32 val = 0; | 372 | u32 val = 0; |
| 373 | int ret; | ||
| 373 | 374 | ||
| 374 | clk_prepare_enable(kbc->clk); | 375 | ret = clk_prepare_enable(kbc->clk); |
| 376 | if (ret) | ||
| 377 | return ret; | ||
| 375 | 378 | ||
| 376 | /* Reset the KBC controller to clear all previous status.*/ | 379 | /* Reset the KBC controller to clear all previous status.*/ |
| 377 | reset_control_assert(kbc->rst); | 380 | reset_control_assert(kbc->rst); |
diff --git a/drivers/input/keyboard/xtkbd.c b/drivers/input/keyboard/xtkbd.c index 7c2325bd7408..8f64b9ded8d0 100644 --- a/drivers/input/keyboard/xtkbd.c +++ b/drivers/input/keyboard/xtkbd.c | |||
| @@ -145,7 +145,7 @@ static void xtkbd_disconnect(struct serio *serio) | |||
| 145 | kfree(xtkbd); | 145 | kfree(xtkbd); |
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | static struct serio_device_id xtkbd_serio_ids[] = { | 148 | static const struct serio_device_id xtkbd_serio_ids[] = { |
| 149 | { | 149 | { |
| 150 | .type = SERIO_XT, | 150 | .type = SERIO_XT, |
| 151 | .proto = SERIO_ANY, | 151 | .proto = SERIO_ANY, |
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 3872488c3fd7..f47e836eaa0f 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig | |||
| @@ -581,6 +581,17 @@ config INPUT_PWM_BEEPER | |||
| 581 | To compile this driver as a module, choose M here: the module will be | 581 | To compile this driver as a module, choose M here: the module will be |
| 582 | called pwm-beeper. | 582 | called pwm-beeper. |
| 583 | 583 | ||
| 584 | config INPUT_RK805_PWRKEY | ||
| 585 | tristate "Rockchip RK805 PMIC power key support" | ||
| 586 | depends on MFD_RK808 | ||
| 587 | help | ||
| 588 | Select this option to enable power key driver for RK805. | ||
| 589 | |||
| 590 | If unsure, say N. | ||
| 591 | |||
| 592 | To compile this driver as a module, choose M here: the module will be | ||
| 593 | called rk805_pwrkey. | ||
| 594 | |||
| 584 | config INPUT_GPIO_ROTARY_ENCODER | 595 | config INPUT_GPIO_ROTARY_ENCODER |
| 585 | tristate "Rotary encoders connected to GPIO pins" | 596 | tristate "Rotary encoders connected to GPIO pins" |
| 586 | depends on GPIOLIB || COMPILE_TEST | 597 | depends on GPIOLIB || COMPILE_TEST |
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index b923a9828c88..1072e0760c19 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile | |||
| @@ -64,6 +64,7 @@ obj-$(CONFIG_INPUT_REGULATOR_HAPTIC) += regulator-haptic.o | |||
| 64 | obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o | 64 | obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o |
| 65 | obj-$(CONFIG_INPUT_AXP20X_PEK) += axp20x-pek.o | 65 | obj-$(CONFIG_INPUT_AXP20X_PEK) += axp20x-pek.o |
| 66 | obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o | 66 | obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o |
| 67 | obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o | ||
| 67 | obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o | 68 | obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o |
| 68 | obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o | 69 | obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o |
| 69 | obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o | 70 | obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o |
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c index 1c5914cae853..ebf4448b31b9 100644 --- a/drivers/input/misc/ati_remote2.c +++ b/drivers/input/misc/ati_remote2.c | |||
| @@ -110,7 +110,7 @@ static const struct kernel_param_ops param_ops_mode_mask = { | |||
| 110 | module_param(mode_mask, mode_mask, 0644); | 110 | module_param(mode_mask, mode_mask, 0644); |
| 111 | MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>"); | 111 | MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>"); |
| 112 | 112 | ||
| 113 | static struct usb_device_id ati_remote2_id_table[] = { | 113 | static const struct usb_device_id ati_remote2_id_table[] = { |
| 114 | { USB_DEVICE(0x0471, 0x0602) }, /* ATI Remote Wonder II */ | 114 | { USB_DEVICE(0x0471, 0x0602) }, /* ATI Remote Wonder II */ |
| 115 | { } | 115 | { } |
| 116 | }; | 116 | }; |
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c index 38c79ebff033..6cee5adc3b5c 100644 --- a/drivers/input/misc/axp20x-pek.c +++ b/drivers/input/misc/axp20x-pek.c | |||
| @@ -29,9 +29,17 @@ | |||
| 29 | #define AXP20X_PEK_STARTUP_MASK (0xc0) | 29 | #define AXP20X_PEK_STARTUP_MASK (0xc0) |
| 30 | #define AXP20X_PEK_SHUTDOWN_MASK (0x03) | 30 | #define AXP20X_PEK_SHUTDOWN_MASK (0x03) |
| 31 | 31 | ||
| 32 | struct axp20x_info { | ||
| 33 | const struct axp20x_time *startup_time; | ||
| 34 | unsigned int startup_mask; | ||
| 35 | const struct axp20x_time *shutdown_time; | ||
| 36 | unsigned int shutdown_mask; | ||
| 37 | }; | ||
| 38 | |||
| 32 | struct axp20x_pek { | 39 | struct axp20x_pek { |
| 33 | struct axp20x_dev *axp20x; | 40 | struct axp20x_dev *axp20x; |
| 34 | struct input_dev *input; | 41 | struct input_dev *input; |
| 42 | struct axp20x_info *info; | ||
| 35 | int irq_dbr; | 43 | int irq_dbr; |
| 36 | int irq_dbf; | 44 | int irq_dbf; |
| 37 | }; | 45 | }; |
| @@ -48,6 +56,13 @@ static const struct axp20x_time startup_time[] = { | |||
| 48 | { .time = 2000, .idx = 3 }, | 56 | { .time = 2000, .idx = 3 }, |
| 49 | }; | 57 | }; |
| 50 | 58 | ||
| 59 | static const struct axp20x_time axp221_startup_time[] = { | ||
| 60 | { .time = 128, .idx = 0 }, | ||
| 61 | { .time = 1000, .idx = 1 }, | ||
| 62 | { .time = 2000, .idx = 2 }, | ||
| 63 | { .time = 3000, .idx = 3 }, | ||
| 64 | }; | ||
| 65 | |||
| 51 | static const struct axp20x_time shutdown_time[] = { | 66 | static const struct axp20x_time shutdown_time[] = { |
| 52 | { .time = 4000, .idx = 0 }, | 67 | { .time = 4000, .idx = 0 }, |
| 53 | { .time = 6000, .idx = 1 }, | 68 | { .time = 6000, .idx = 1 }, |
| @@ -55,31 +70,25 @@ static const struct axp20x_time shutdown_time[] = { | |||
| 55 | { .time = 10000, .idx = 3 }, | 70 | { .time = 10000, .idx = 3 }, |
| 56 | }; | 71 | }; |
| 57 | 72 | ||
| 58 | struct axp20x_pek_ext_attr { | 73 | static const struct axp20x_info axp20x_info = { |
| 59 | const struct axp20x_time *p_time; | 74 | .startup_time = startup_time, |
| 60 | unsigned int mask; | 75 | .startup_mask = AXP20X_PEK_STARTUP_MASK, |
| 61 | }; | 76 | .shutdown_time = shutdown_time, |
| 62 | 77 | .shutdown_mask = AXP20X_PEK_SHUTDOWN_MASK, | |
| 63 | static struct axp20x_pek_ext_attr axp20x_pek_startup_ext_attr = { | ||
| 64 | .p_time = startup_time, | ||
| 65 | .mask = AXP20X_PEK_STARTUP_MASK, | ||
| 66 | }; | 78 | }; |
| 67 | 79 | ||
| 68 | static struct axp20x_pek_ext_attr axp20x_pek_shutdown_ext_attr = { | 80 | static const struct axp20x_info axp221_info = { |
| 69 | .p_time = shutdown_time, | 81 | .startup_time = axp221_startup_time, |
| 70 | .mask = AXP20X_PEK_SHUTDOWN_MASK, | 82 | .startup_mask = AXP20X_PEK_STARTUP_MASK, |
| 83 | .shutdown_time = shutdown_time, | ||
| 84 | .shutdown_mask = AXP20X_PEK_SHUTDOWN_MASK, | ||
| 71 | }; | 85 | }; |
| 72 | 86 | ||
| 73 | static struct axp20x_pek_ext_attr *get_axp_ext_attr(struct device_attribute *attr) | 87 | static ssize_t axp20x_show_attr(struct device *dev, |
| 74 | { | 88 | const struct axp20x_time *time, |
| 75 | return container_of(attr, struct dev_ext_attribute, attr)->var; | 89 | unsigned int mask, char *buf) |
| 76 | } | ||
| 77 | |||
| 78 | static ssize_t axp20x_show_ext_attr(struct device *dev, | ||
| 79 | struct device_attribute *attr, char *buf) | ||
| 80 | { | 90 | { |
| 81 | struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); | 91 | struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); |
| 82 | struct axp20x_pek_ext_attr *axp20x_ea = get_axp_ext_attr(attr); | ||
| 83 | unsigned int val; | 92 | unsigned int val; |
| 84 | int ret, i; | 93 | int ret, i; |
| 85 | 94 | ||
| @@ -87,22 +96,42 @@ static ssize_t axp20x_show_ext_attr(struct device *dev, | |||
| 87 | if (ret != 0) | 96 | if (ret != 0) |
| 88 | return ret; | 97 | return ret; |
| 89 | 98 | ||
| 90 | val &= axp20x_ea->mask; | 99 | val &= mask; |
| 91 | val >>= ffs(axp20x_ea->mask) - 1; | 100 | val >>= ffs(mask) - 1; |
| 92 | 101 | ||
| 93 | for (i = 0; i < 4; i++) | 102 | for (i = 0; i < 4; i++) |
| 94 | if (val == axp20x_ea->p_time[i].idx) | 103 | if (val == time[i].idx) |
| 95 | val = axp20x_ea->p_time[i].time; | 104 | val = time[i].time; |
| 96 | 105 | ||
| 97 | return sprintf(buf, "%u\n", val); | 106 | return sprintf(buf, "%u\n", val); |
| 98 | } | 107 | } |
| 99 | 108 | ||
| 100 | static ssize_t axp20x_store_ext_attr(struct device *dev, | 109 | static ssize_t axp20x_show_attr_startup(struct device *dev, |
| 101 | struct device_attribute *attr, | 110 | struct device_attribute *attr, |
| 102 | const char *buf, size_t count) | 111 | char *buf) |
| 112 | { | ||
| 113 | struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); | ||
| 114 | |||
| 115 | return axp20x_show_attr(dev, axp20x_pek->info->startup_time, | ||
| 116 | axp20x_pek->info->startup_mask, buf); | ||
| 117 | } | ||
| 118 | |||
| 119 | static ssize_t axp20x_show_attr_shutdown(struct device *dev, | ||
| 120 | struct device_attribute *attr, | ||
| 121 | char *buf) | ||
| 122 | { | ||
| 123 | struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); | ||
| 124 | |||
| 125 | return axp20x_show_attr(dev, axp20x_pek->info->shutdown_time, | ||
| 126 | axp20x_pek->info->shutdown_mask, buf); | ||
| 127 | } | ||
| 128 | |||
| 129 | static ssize_t axp20x_store_attr(struct device *dev, | ||
| 130 | const struct axp20x_time *time, | ||
| 131 | unsigned int mask, const char *buf, | ||
| 132 | size_t count) | ||
| 103 | { | 133 | { |
| 104 | struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); | 134 | struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); |
| 105 | struct axp20x_pek_ext_attr *axp20x_ea = get_axp_ext_attr(attr); | ||
| 106 | char val_str[20]; | 135 | char val_str[20]; |
| 107 | size_t len; | 136 | size_t len; |
| 108 | int ret, i; | 137 | int ret, i; |
| @@ -123,39 +152,52 @@ static ssize_t axp20x_store_ext_attr(struct device *dev, | |||
| 123 | for (i = 3; i >= 0; i--) { | 152 | for (i = 3; i >= 0; i--) { |
| 124 | unsigned int err; | 153 | unsigned int err; |
| 125 | 154 | ||
| 126 | err = abs(axp20x_ea->p_time[i].time - val); | 155 | err = abs(time[i].time - val); |
| 127 | if (err < best_err) { | 156 | if (err < best_err) { |
| 128 | best_err = err; | 157 | best_err = err; |
| 129 | idx = axp20x_ea->p_time[i].idx; | 158 | idx = time[i].idx; |
| 130 | } | 159 | } |
| 131 | 160 | ||
| 132 | if (!err) | 161 | if (!err) |
| 133 | break; | 162 | break; |
| 134 | } | 163 | } |
| 135 | 164 | ||
| 136 | idx <<= ffs(axp20x_ea->mask) - 1; | 165 | idx <<= ffs(mask) - 1; |
| 137 | ret = regmap_update_bits(axp20x_pek->axp20x->regmap, | 166 | ret = regmap_update_bits(axp20x_pek->axp20x->regmap, AXP20X_PEK_KEY, |
| 138 | AXP20X_PEK_KEY, | 167 | mask, idx); |
| 139 | axp20x_ea->mask, idx); | ||
| 140 | if (ret != 0) | 168 | if (ret != 0) |
| 141 | return -EINVAL; | 169 | return -EINVAL; |
| 142 | 170 | ||
| 143 | return count; | 171 | return count; |
| 144 | } | 172 | } |
| 145 | 173 | ||
| 146 | static struct dev_ext_attribute axp20x_dev_attr_startup = { | 174 | static ssize_t axp20x_store_attr_startup(struct device *dev, |
| 147 | .attr = __ATTR(startup, 0644, axp20x_show_ext_attr, axp20x_store_ext_attr), | 175 | struct device_attribute *attr, |
| 148 | .var = &axp20x_pek_startup_ext_attr, | 176 | const char *buf, size_t count) |
| 149 | }; | 177 | { |
| 178 | struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); | ||
| 150 | 179 | ||
| 151 | static struct dev_ext_attribute axp20x_dev_attr_shutdown = { | 180 | return axp20x_store_attr(dev, axp20x_pek->info->startup_time, |
| 152 | .attr = __ATTR(shutdown, 0644, axp20x_show_ext_attr, axp20x_store_ext_attr), | 181 | axp20x_pek->info->startup_mask, buf, count); |
| 153 | .var = &axp20x_pek_shutdown_ext_attr, | 182 | } |
| 154 | }; | 183 | |
| 184 | static ssize_t axp20x_store_attr_shutdown(struct device *dev, | ||
| 185 | struct device_attribute *attr, | ||
| 186 | const char *buf, size_t count) | ||
| 187 | { | ||
| 188 | struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); | ||
| 189 | |||
| 190 | return axp20x_store_attr(dev, axp20x_pek->info->shutdown_time, | ||
| 191 | axp20x_pek->info->shutdown_mask, buf, count); | ||
| 192 | } | ||
| 193 | |||
| 194 | DEVICE_ATTR(startup, 0644, axp20x_show_attr_startup, axp20x_store_attr_startup); | ||
| 195 | DEVICE_ATTR(shutdown, 0644, axp20x_show_attr_shutdown, | ||
| 196 | axp20x_store_attr_shutdown); | ||
| 155 | 197 | ||
| 156 | static struct attribute *axp20x_attributes[] = { | 198 | static struct attribute *axp20x_attributes[] = { |
| 157 | &axp20x_dev_attr_startup.attr.attr, | 199 | &dev_attr_startup.attr, |
| 158 | &axp20x_dev_attr_shutdown.attr.attr, | 200 | &dev_attr_shutdown.attr, |
| 159 | NULL, | 201 | NULL, |
| 160 | }; | 202 | }; |
| 161 | 203 | ||
| @@ -182,13 +224,6 @@ static irqreturn_t axp20x_pek_irq(int irq, void *pwr) | |||
| 182 | return IRQ_HANDLED; | 224 | return IRQ_HANDLED; |
| 183 | } | 225 | } |
| 184 | 226 | ||
| 185 | static void axp20x_remove_sysfs_group(void *_data) | ||
| 186 | { | ||
| 187 | struct device *dev = _data; | ||
| 188 | |||
| 189 | sysfs_remove_group(&dev->kobj, &axp20x_attribute_group); | ||
| 190 | } | ||
| 191 | |||
| 192 | static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, | 227 | static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, |
| 193 | struct platform_device *pdev) | 228 | struct platform_device *pdev) |
| 194 | { | 229 | { |
| @@ -298,8 +333,14 @@ static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek, | |||
| 298 | static int axp20x_pek_probe(struct platform_device *pdev) | 333 | static int axp20x_pek_probe(struct platform_device *pdev) |
| 299 | { | 334 | { |
| 300 | struct axp20x_pek *axp20x_pek; | 335 | struct axp20x_pek *axp20x_pek; |
| 336 | const struct platform_device_id *match = platform_get_device_id(pdev); | ||
| 301 | int error; | 337 | int error; |
| 302 | 338 | ||
| 339 | if (!match) { | ||
| 340 | dev_err(&pdev->dev, "Failed to get platform_device_id\n"); | ||
| 341 | return -EINVAL; | ||
| 342 | } | ||
| 343 | |||
| 303 | axp20x_pek = devm_kzalloc(&pdev->dev, sizeof(struct axp20x_pek), | 344 | axp20x_pek = devm_kzalloc(&pdev->dev, sizeof(struct axp20x_pek), |
| 304 | GFP_KERNEL); | 345 | GFP_KERNEL); |
| 305 | if (!axp20x_pek) | 346 | if (!axp20x_pek) |
| @@ -313,18 +354,11 @@ static int axp20x_pek_probe(struct platform_device *pdev) | |||
| 313 | return error; | 354 | return error; |
| 314 | } | 355 | } |
| 315 | 356 | ||
| 316 | error = sysfs_create_group(&pdev->dev.kobj, &axp20x_attribute_group); | 357 | axp20x_pek->info = (struct axp20x_info *)match->driver_data; |
| 317 | if (error) { | ||
| 318 | dev_err(&pdev->dev, "Failed to create sysfs attributes: %d\n", | ||
| 319 | error); | ||
| 320 | return error; | ||
| 321 | } | ||
| 322 | 358 | ||
| 323 | error = devm_add_action(&pdev->dev, | 359 | error = devm_device_add_group(&pdev->dev, &axp20x_attribute_group); |
| 324 | axp20x_remove_sysfs_group, &pdev->dev); | ||
| 325 | if (error) { | 360 | if (error) { |
| 326 | axp20x_remove_sysfs_group(&pdev->dev); | 361 | dev_err(&pdev->dev, "Failed to create sysfs attributes: %d\n", |
| 327 | dev_err(&pdev->dev, "Failed to add sysfs cleanup action: %d\n", | ||
| 328 | error); | 362 | error); |
| 329 | return error; | 363 | return error; |
| 330 | } | 364 | } |
| @@ -358,8 +392,21 @@ static const struct dev_pm_ops axp20x_pek_pm_ops = { | |||
| 358 | #endif | 392 | #endif |
| 359 | }; | 393 | }; |
| 360 | 394 | ||
| 395 | static const struct platform_device_id axp_pek_id_match[] = { | ||
| 396 | { | ||
| 397 | .name = "axp20x-pek", | ||
| 398 | .driver_data = (kernel_ulong_t)&axp20x_info, | ||
| 399 | }, | ||
| 400 | { | ||
| 401 | .name = "axp221-pek", | ||
| 402 | .driver_data = (kernel_ulong_t)&axp221_info, | ||
| 403 | }, | ||
| 404 | { /* sentinel */ } | ||
| 405 | }; | ||
| 406 | |||
| 361 | static struct platform_driver axp20x_pek_driver = { | 407 | static struct platform_driver axp20x_pek_driver = { |
| 362 | .probe = axp20x_pek_probe, | 408 | .probe = axp20x_pek_probe, |
| 409 | .id_table = axp_pek_id_match, | ||
| 363 | .driver = { | 410 | .driver = { |
| 364 | .name = "axp20x-pek", | 411 | .name = "axp20x-pek", |
| 365 | .pm = &axp20x_pek_pm_ops, | 412 | .pm = &axp20x_pek_pm_ops, |
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index f4e8fbec6a94..6bf82ea8c918 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c | |||
| @@ -1261,7 +1261,7 @@ static umode_t ims_pcu_is_attr_visible(struct kobject *kobj, | |||
| 1261 | return mode; | 1261 | return mode; |
| 1262 | } | 1262 | } |
| 1263 | 1263 | ||
| 1264 | static struct attribute_group ims_pcu_attr_group = { | 1264 | static const struct attribute_group ims_pcu_attr_group = { |
| 1265 | .is_visible = ims_pcu_is_attr_visible, | 1265 | .is_visible = ims_pcu_is_attr_visible, |
| 1266 | .attrs = ims_pcu_attrs, | 1266 | .attrs = ims_pcu_attrs, |
| 1267 | }; | 1267 | }; |
| @@ -1480,7 +1480,7 @@ static struct attribute *ims_pcu_ofn_attrs[] = { | |||
| 1480 | NULL | 1480 | NULL |
| 1481 | }; | 1481 | }; |
| 1482 | 1482 | ||
| 1483 | static struct attribute_group ims_pcu_ofn_attr_group = { | 1483 | static const struct attribute_group ims_pcu_ofn_attr_group = { |
| 1484 | .name = "ofn", | 1484 | .name = "ofn", |
| 1485 | .attrs = ims_pcu_ofn_attrs, | 1485 | .attrs = ims_pcu_ofn_attrs, |
| 1486 | }; | 1486 | }; |
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c index a3fe4a990cc9..77c47d6325fe 100644 --- a/drivers/input/misc/keyspan_remote.c +++ b/drivers/input/misc/keyspan_remote.c | |||
| @@ -85,7 +85,7 @@ static const unsigned short keyspan_key_table[] = { | |||
| 85 | }; | 85 | }; |
| 86 | 86 | ||
| 87 | /* table of devices that work with this driver */ | 87 | /* table of devices that work with this driver */ |
| 88 | static struct usb_device_id keyspan_table[] = { | 88 | static const struct usb_device_id keyspan_table[] = { |
| 89 | { USB_DEVICE(USB_KEYSPAN_VENDOR_ID, USB_KEYSPAN_PRODUCT_UIA11) }, | 89 | { USB_DEVICE(USB_KEYSPAN_VENDOR_ID, USB_KEYSPAN_PRODUCT_UIA11) }, |
| 90 | { } /* Terminating entry */ | 90 | { } /* Terminating entry */ |
| 91 | }; | 91 | }; |
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c index 72b1fc3ab910..56ddba21de84 100644 --- a/drivers/input/misc/pcspkr.c +++ b/drivers/input/misc/pcspkr.c | |||
| @@ -18,25 +18,30 @@ | |||
| 18 | #include <linux/input.h> | 18 | #include <linux/input.h> |
| 19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
| 20 | #include <linux/timex.h> | 20 | #include <linux/timex.h> |
| 21 | #include <asm/io.h> | 21 | #include <linux/io.h> |
| 22 | 22 | ||
| 23 | MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); | 23 | MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); |
| 24 | MODULE_DESCRIPTION("PC Speaker beeper driver"); | 24 | MODULE_DESCRIPTION("PC Speaker beeper driver"); |
| 25 | MODULE_LICENSE("GPL"); | 25 | MODULE_LICENSE("GPL"); |
| 26 | MODULE_ALIAS("platform:pcspkr"); | 26 | MODULE_ALIAS("platform:pcspkr"); |
| 27 | 27 | ||
| 28 | static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) | 28 | static int pcspkr_event(struct input_dev *dev, unsigned int type, |
| 29 | unsigned int code, int value) | ||
| 29 | { | 30 | { |
| 30 | unsigned int count = 0; | 31 | unsigned int count = 0; |
| 31 | unsigned long flags; | 32 | unsigned long flags; |
| 32 | 33 | ||
| 33 | if (type != EV_SND) | 34 | if (type != EV_SND) |
| 34 | return -1; | 35 | return -EINVAL; |
| 35 | 36 | ||
| 36 | switch (code) { | 37 | switch (code) { |
| 37 | case SND_BELL: if (value) value = 1000; | 38 | case SND_BELL: |
| 38 | case SND_TONE: break; | 39 | if (value) |
| 39 | default: return -1; | 40 | value = 1000; |
| 41 | case SND_TONE: | ||
| 42 | break; | ||
| 43 | default: | ||
| 44 | return -EINVAL; | ||
| 40 | } | 45 | } |
| 41 | 46 | ||
| 42 | if (value > 20 && value < 32767) | 47 | if (value > 20 && value < 32767) |
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c index 84909a12ff36..5c8c79623c87 100644 --- a/drivers/input/misc/powermate.c +++ b/drivers/input/misc/powermate.c | |||
| @@ -432,7 +432,7 @@ static void powermate_disconnect(struct usb_interface *intf) | |||
| 432 | } | 432 | } |
| 433 | } | 433 | } |
| 434 | 434 | ||
| 435 | static struct usb_device_id powermate_devices [] = { | 435 | static const struct usb_device_id powermate_devices[] = { |
| 436 | { USB_DEVICE(POWERMATE_VENDOR, POWERMATE_PRODUCT_NEW) }, | 436 | { USB_DEVICE(POWERMATE_VENDOR, POWERMATE_PRODUCT_NEW) }, |
| 437 | { USB_DEVICE(POWERMATE_VENDOR, POWERMATE_PRODUCT_OLD) }, | 437 | { USB_DEVICE(POWERMATE_VENDOR, POWERMATE_PRODUCT_OLD) }, |
| 438 | { USB_DEVICE(CONTOUR_VENDOR, CONTOUR_JOG) }, | 438 | { USB_DEVICE(CONTOUR_VENDOR, CONTOUR_JOG) }, |
diff --git a/drivers/input/misc/rk805-pwrkey.c b/drivers/input/misc/rk805-pwrkey.c new file mode 100644 index 000000000000..921003963a53 --- /dev/null +++ b/drivers/input/misc/rk805-pwrkey.c | |||
| @@ -0,0 +1,111 @@ | |||
| 1 | /* | ||
| 2 | * Rockchip RK805 PMIC Power Key driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd | ||
| 5 | * | ||
| 6 | * Author: Joseph Chen <chenjh@rock-chips.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the | ||
| 10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
| 11 | * option) any later version. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/errno.h> | ||
| 15 | #include <linux/init.h> | ||
| 16 | #include <linux/input.h> | ||
| 17 | #include <linux/interrupt.h> | ||
| 18 | #include <linux/kernel.h> | ||
| 19 | #include <linux/module.h> | ||
| 20 | #include <linux/platform_device.h> | ||
| 21 | |||
| 22 | static irqreturn_t pwrkey_fall_irq(int irq, void *_pwr) | ||
| 23 | { | ||
| 24 | struct input_dev *pwr = _pwr; | ||
| 25 | |||
| 26 | input_report_key(pwr, KEY_POWER, 1); | ||
| 27 | input_sync(pwr); | ||
| 28 | |||
| 29 | return IRQ_HANDLED; | ||
| 30 | } | ||
| 31 | |||
| 32 | static irqreturn_t pwrkey_rise_irq(int irq, void *_pwr) | ||
| 33 | { | ||
| 34 | struct input_dev *pwr = _pwr; | ||
| 35 | |||
| 36 | input_report_key(pwr, KEY_POWER, 0); | ||
| 37 | input_sync(pwr); | ||
| 38 | |||
| 39 | return IRQ_HANDLED; | ||
| 40 | } | ||
| 41 | |||
| 42 | static int rk805_pwrkey_probe(struct platform_device *pdev) | ||
| 43 | { | ||
| 44 | struct input_dev *pwr; | ||
| 45 | int fall_irq, rise_irq; | ||
| 46 | int err; | ||
| 47 | |||
| 48 | pwr = devm_input_allocate_device(&pdev->dev); | ||
| 49 | if (!pwr) { | ||
| 50 | dev_err(&pdev->dev, "Can't allocate power button\n"); | ||
| 51 | return -ENOMEM; | ||
| 52 | } | ||
| 53 | |||
| 54 | pwr->name = "rk805 pwrkey"; | ||
| 55 | pwr->phys = "rk805_pwrkey/input0"; | ||
| 56 | pwr->id.bustype = BUS_HOST; | ||
| 57 | input_set_capability(pwr, EV_KEY, KEY_POWER); | ||
| 58 | |||
| 59 | fall_irq = platform_get_irq(pdev, 0); | ||
| 60 | if (fall_irq < 0) { | ||
| 61 | dev_err(&pdev->dev, "Can't get fall irq: %d\n", fall_irq); | ||
| 62 | return fall_irq; | ||
| 63 | } | ||
| 64 | |||
| 65 | rise_irq = platform_get_irq(pdev, 1); | ||
| 66 | if (rise_irq < 0) { | ||
| 67 | dev_err(&pdev->dev, "Can't get rise irq: %d\n", rise_irq); | ||
| 68 | return rise_irq; | ||
| 69 | } | ||
| 70 | |||
| 71 | err = devm_request_any_context_irq(&pwr->dev, fall_irq, | ||
| 72 | pwrkey_fall_irq, | ||
| 73 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, | ||
| 74 | "rk805_pwrkey_fall", pwr); | ||
| 75 | if (err < 0) { | ||
| 76 | dev_err(&pdev->dev, "Can't register fall irq: %d\n", err); | ||
| 77 | return err; | ||
| 78 | } | ||
| 79 | |||
| 80 | err = devm_request_any_context_irq(&pwr->dev, rise_irq, | ||
| 81 | pwrkey_rise_irq, | ||
| 82 | IRQF_TRIGGER_RISING | IRQF_ONESHOT, | ||
| 83 | "rk805_pwrkey_rise", pwr); | ||
| 84 | if (err < 0) { | ||
| 85 | dev_err(&pdev->dev, "Can't register rise irq: %d\n", err); | ||
| 86 | return err; | ||
| 87 | } | ||
| 88 | |||
| 89 | err = input_register_device(pwr); | ||
| 90 | if (err) { | ||
| 91 | dev_err(&pdev->dev, "Can't register power button: %d\n", err); | ||
| 92 | return err; | ||
| 93 | } | ||
| 94 | |||
| 95 | platform_set_drvdata(pdev, pwr); | ||
| 96 | device_init_wakeup(&pdev->dev, true); | ||
| 97 | |||
| 98 | return 0; | ||
| 99 | } | ||
| 100 | |||
| 101 | static struct platform_driver rk805_pwrkey_driver = { | ||
| 102 | .probe = rk805_pwrkey_probe, | ||
| 103 | .driver = { | ||
| 104 | .name = "rk805-pwrkey", | ||
| 105 | }, | ||
| 106 | }; | ||
| 107 | module_platform_driver(rk805_pwrkey_driver); | ||
| 108 | |||
| 109 | MODULE_AUTHOR("Joseph Chen <chenjh@rock-chips.com>"); | ||
| 110 | MODULE_DESCRIPTION("RK805 PMIC Power Key driver"); | ||
| 111 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index fa130e7b734c..6bf56bb5f8d9 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c | |||
| @@ -84,17 +84,20 @@ static void xenkbd_handle_key_event(struct xenkbd_info *info, | |||
| 84 | struct xenkbd_key *key) | 84 | struct xenkbd_key *key) |
| 85 | { | 85 | { |
| 86 | struct input_dev *dev; | 86 | struct input_dev *dev; |
| 87 | int value = key->pressed; | ||
| 87 | 88 | ||
| 88 | if (test_bit(key->keycode, info->ptr->keybit)) { | 89 | if (test_bit(key->keycode, info->ptr->keybit)) { |
| 89 | dev = info->ptr; | 90 | dev = info->ptr; |
| 90 | } else if (test_bit(key->keycode, info->kbd->keybit)) { | 91 | } else if (test_bit(key->keycode, info->kbd->keybit)) { |
| 91 | dev = info->kbd; | 92 | dev = info->kbd; |
| 93 | if (key->pressed && test_bit(key->keycode, info->kbd->key)) | ||
| 94 | value = 2; /* Mark as autorepeat */ | ||
| 92 | } else { | 95 | } else { |
| 93 | pr_warn("unhandled keycode 0x%x\n", key->keycode); | 96 | pr_warn("unhandled keycode 0x%x\n", key->keycode); |
| 94 | return; | 97 | return; |
| 95 | } | 98 | } |
| 96 | 99 | ||
| 97 | input_report_key(dev, key->keycode, key->pressed); | 100 | input_event(dev, EV_KEY, key->keycode, value); |
| 98 | input_sync(dev); | 101 | input_sync(dev); |
| 99 | } | 102 | } |
| 100 | 103 | ||
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c index 6e7ff9561d92..a1e0ff59d2f2 100644 --- a/drivers/input/misc/yealink.c +++ b/drivers/input/misc/yealink.c | |||
| @@ -798,7 +798,7 @@ static struct attribute *yld_attributes[] = { | |||
| 798 | NULL | 798 | NULL |
| 799 | }; | 799 | }; |
| 800 | 800 | ||
| 801 | static struct attribute_group yld_attr_group = { | 801 | static const struct attribute_group yld_attr_group = { |
| 802 | .attrs = yld_attributes | 802 | .attrs = yld_attributes |
| 803 | }; | 803 | }; |
| 804 | 804 | ||
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c index ef234c9b2f2f..81a695d0b4e0 100644 --- a/drivers/input/mouse/appletouch.c +++ b/drivers/input/mouse/appletouch.c | |||
| @@ -125,7 +125,7 @@ static const struct atp_info geyser4_info = { | |||
| 125 | * According to Info.plist Geyser IV is the same as Geyser III.) | 125 | * According to Info.plist Geyser IV is the same as Geyser III.) |
| 126 | */ | 126 | */ |
| 127 | 127 | ||
| 128 | static struct usb_device_id atp_table[] = { | 128 | static const struct usb_device_id atp_table[] = { |
| 129 | /* PowerBooks Feb 2005, iBooks G4 */ | 129 | /* PowerBooks Feb 2005, iBooks G4 */ |
| 130 | ATP_DEVICE(0x020e, fountain_info), /* FOUNTAIN ANSI */ | 130 | ATP_DEVICE(0x020e, fountain_info), /* FOUNTAIN ANSI */ |
| 131 | ATP_DEVICE(0x020f, fountain_info), /* FOUNTAIN ISO */ | 131 | ATP_DEVICE(0x020f, fountain_info), /* FOUNTAIN ISO */ |
diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c index b27aa637f877..b64b81599f7e 100644 --- a/drivers/input/mouse/byd.c +++ b/drivers/input/mouse/byd.c | |||
| @@ -344,7 +344,7 @@ static int byd_reset_touchpad(struct psmouse *psmouse) | |||
| 344 | u8 param[4]; | 344 | u8 param[4]; |
| 345 | size_t i; | 345 | size_t i; |
| 346 | 346 | ||
| 347 | const struct { | 347 | static const struct { |
| 348 | u16 command; | 348 | u16 command; |
| 349 | u8 arg; | 349 | u8 arg; |
| 350 | } seq[] = { | 350 | } seq[] = { |
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index 61c202436250..599544c1a91c 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h | |||
| @@ -58,7 +58,7 @@ struct elan_transport_ops { | |||
| 58 | 58 | ||
| 59 | int (*get_version)(struct i2c_client *client, bool iap, u8 *version); | 59 | int (*get_version)(struct i2c_client *client, bool iap, u8 *version); |
| 60 | int (*get_sm_version)(struct i2c_client *client, | 60 | int (*get_sm_version)(struct i2c_client *client, |
| 61 | u16 *ic_type, u8 *version); | 61 | u16 *ic_type, u8 *version, u8 *clickpad); |
| 62 | int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); | 62 | int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); |
| 63 | int (*get_product_id)(struct i2c_client *client, u16 *id); | 63 | int (*get_product_id)(struct i2c_client *client, u16 *id); |
| 64 | 64 | ||
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index cfbc8ba4c96c..0e761d079dc4 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c | |||
| @@ -95,6 +95,7 @@ struct elan_tp_data { | |||
| 95 | u8 min_baseline; | 95 | u8 min_baseline; |
| 96 | u8 max_baseline; | 96 | u8 max_baseline; |
| 97 | bool baseline_ready; | 97 | bool baseline_ready; |
| 98 | u8 clickpad; | ||
| 98 | }; | 99 | }; |
| 99 | 100 | ||
| 100 | static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count, | 101 | static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count, |
| @@ -213,7 +214,7 @@ static int elan_query_product(struct elan_tp_data *data) | |||
| 213 | return error; | 214 | return error; |
| 214 | 215 | ||
| 215 | error = data->ops->get_sm_version(data->client, &data->ic_type, | 216 | error = data->ops->get_sm_version(data->client, &data->ic_type, |
| 216 | &data->sm_version); | 217 | &data->sm_version, &data->clickpad); |
| 217 | if (error) | 218 | if (error) |
| 218 | return error; | 219 | return error; |
| 219 | 220 | ||
| @@ -923,6 +924,7 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet) | |||
| 923 | } | 924 | } |
| 924 | 925 | ||
| 925 | input_report_key(input, BTN_LEFT, tp_info & 0x01); | 926 | input_report_key(input, BTN_LEFT, tp_info & 0x01); |
| 927 | input_report_key(input, BTN_RIGHT, tp_info & 0x02); | ||
| 926 | input_report_abs(input, ABS_DISTANCE, hover_event != 0); | 928 | input_report_abs(input, ABS_DISTANCE, hover_event != 0); |
| 927 | input_mt_report_pointer_emulation(input, true); | 929 | input_mt_report_pointer_emulation(input, true); |
| 928 | input_sync(input); | 930 | input_sync(input); |
| @@ -991,7 +993,10 @@ static int elan_setup_input_device(struct elan_tp_data *data) | |||
| 991 | 993 | ||
| 992 | __set_bit(EV_ABS, input->evbit); | 994 | __set_bit(EV_ABS, input->evbit); |
| 993 | __set_bit(INPUT_PROP_POINTER, input->propbit); | 995 | __set_bit(INPUT_PROP_POINTER, input->propbit); |
| 994 | __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); | 996 | if (data->clickpad) |
| 997 | __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); | ||
| 998 | else | ||
| 999 | __set_bit(BTN_RIGHT, input->keybit); | ||
| 995 | __set_bit(BTN_LEFT, input->keybit); | 1000 | __set_bit(BTN_LEFT, input->keybit); |
| 996 | 1001 | ||
| 997 | /* Set up ST parameters */ | 1002 | /* Set up ST parameters */ |
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 80172f25974d..15b1330606c1 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c | |||
| @@ -288,7 +288,8 @@ static int elan_i2c_get_version(struct i2c_client *client, | |||
| 288 | } | 288 | } |
| 289 | 289 | ||
| 290 | static int elan_i2c_get_sm_version(struct i2c_client *client, | 290 | static int elan_i2c_get_sm_version(struct i2c_client *client, |
| 291 | u16 *ic_type, u8 *version) | 291 | u16 *ic_type, u8 *version, |
| 292 | u8 *clickpad) | ||
| 292 | { | 293 | { |
| 293 | int error; | 294 | int error; |
| 294 | u8 pattern_ver; | 295 | u8 pattern_ver; |
| @@ -317,6 +318,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client, | |||
| 317 | return error; | 318 | return error; |
| 318 | } | 319 | } |
| 319 | *version = val[1]; | 320 | *version = val[1]; |
| 321 | *clickpad = val[0] & 0x10; | ||
| 320 | } else { | 322 | } else { |
| 321 | error = elan_i2c_read_cmd(client, ETP_I2C_OSM_VERSION_CMD, val); | 323 | error = elan_i2c_read_cmd(client, ETP_I2C_OSM_VERSION_CMD, val); |
| 322 | if (error) { | 324 | if (error) { |
| @@ -326,6 +328,15 @@ static int elan_i2c_get_sm_version(struct i2c_client *client, | |||
| 326 | } | 328 | } |
| 327 | *version = val[0]; | 329 | *version = val[0]; |
| 328 | *ic_type = val[1]; | 330 | *ic_type = val[1]; |
| 331 | |||
| 332 | error = elan_i2c_read_cmd(client, ETP_I2C_NSM_VERSION_CMD, | ||
| 333 | val); | ||
| 334 | if (error) { | ||
| 335 | dev_err(&client->dev, "failed to get SM version: %d\n", | ||
| 336 | error); | ||
| 337 | return error; | ||
| 338 | } | ||
| 339 | *clickpad = val[0] & 0x10; | ||
| 329 | } | 340 | } |
| 330 | 341 | ||
| 331 | return 0; | 342 | return 0; |
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index df7a57ca7331..29f99529b187 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c | |||
| @@ -166,7 +166,8 @@ static int elan_smbus_get_version(struct i2c_client *client, | |||
| 166 | } | 166 | } |
| 167 | 167 | ||
| 168 | static int elan_smbus_get_sm_version(struct i2c_client *client, | 168 | static int elan_smbus_get_sm_version(struct i2c_client *client, |
| 169 | u16 *ic_type, u8 *version) | 169 | u16 *ic_type, u8 *version, |
| 170 | u8 *clickpad) | ||
| 170 | { | 171 | { |
| 171 | int error; | 172 | int error; |
| 172 | u8 val[3]; | 173 | u8 val[3]; |
| @@ -180,6 +181,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, | |||
| 180 | 181 | ||
| 181 | *version = val[0]; | 182 | *version = val[0]; |
| 182 | *ic_type = val[1]; | 183 | *ic_type = val[1]; |
| 184 | *clickpad = val[0] & 0x10; | ||
| 183 | return 0; | 185 | return 0; |
| 184 | } | 186 | } |
| 185 | 187 | ||
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 791993215ea3..6428d6f4d568 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
| @@ -1377,7 +1377,7 @@ static struct attribute *elantech_attrs[] = { | |||
| 1377 | NULL | 1377 | NULL |
| 1378 | }; | 1378 | }; |
| 1379 | 1379 | ||
| 1380 | static struct attribute_group elantech_attr_group = { | 1380 | static const struct attribute_group elantech_attr_group = { |
| 1381 | .attrs = elantech_attrs, | 1381 | .attrs = elantech_attrs, |
| 1382 | }; | 1382 | }; |
| 1383 | 1383 | ||
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index f73b47b8c578..6a5649e52eed 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c | |||
| @@ -101,7 +101,7 @@ static struct attribute *psmouse_attributes[] = { | |||
| 101 | NULL | 101 | NULL |
| 102 | }; | 102 | }; |
| 103 | 103 | ||
| 104 | static struct attribute_group psmouse_attribute_group = { | 104 | static const struct attribute_group psmouse_attribute_group = { |
| 105 | .attrs = psmouse_attributes, | 105 | .attrs = psmouse_attributes, |
| 106 | }; | 106 | }; |
| 107 | 107 | ||
diff --git a/drivers/input/mouse/synaptics_usb.c b/drivers/input/mouse/synaptics_usb.c index 6bcc0189c1c9..cb7d15d826d0 100644 --- a/drivers/input/mouse/synaptics_usb.c +++ b/drivers/input/mouse/synaptics_usb.c | |||
| @@ -525,7 +525,7 @@ static int synusb_reset_resume(struct usb_interface *intf) | |||
| 525 | return synusb_resume(intf); | 525 | return synusb_resume(intf); |
| 526 | } | 526 | } |
| 527 | 527 | ||
| 528 | static struct usb_device_id synusb_idtable[] = { | 528 | static const struct usb_device_id synusb_idtable[] = { |
| 529 | { USB_DEVICE_SYNAPTICS(TP, SYNUSB_TOUCHPAD) }, | 529 | { USB_DEVICE_SYNAPTICS(TP, SYNUSB_TOUCHPAD) }, |
| 530 | { USB_DEVICE_SYNAPTICS(INT_TP, SYNUSB_TOUCHPAD) }, | 530 | { USB_DEVICE_SYNAPTICS(INT_TP, SYNUSB_TOUCHPAD) }, |
| 531 | { USB_DEVICE_SYNAPTICS(CPAD, | 531 | { USB_DEVICE_SYNAPTICS(CPAD, |
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 0e0ff84088fd..2d7f691ec71c 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #define MOUSEDEV_MINORS 31 | 15 | #define MOUSEDEV_MINORS 31 |
| 16 | #define MOUSEDEV_MIX 63 | 16 | #define MOUSEDEV_MIX 63 |
| 17 | 17 | ||
| 18 | #include <linux/bitops.h> | ||
| 18 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
| 19 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| 20 | #include <linux/poll.h> | 21 | #include <linux/poll.h> |
| @@ -103,7 +104,7 @@ struct mousedev_client { | |||
| 103 | spinlock_t packet_lock; | 104 | spinlock_t packet_lock; |
| 104 | int pos_x, pos_y; | 105 | int pos_x, pos_y; |
| 105 | 106 | ||
| 106 | signed char ps2[6]; | 107 | u8 ps2[6]; |
| 107 | unsigned char ready, buffer, bufsiz; | 108 | unsigned char ready, buffer, bufsiz; |
| 108 | unsigned char imexseq, impsseq; | 109 | unsigned char imexseq, impsseq; |
| 109 | enum mousedev_emul mode; | 110 | enum mousedev_emul mode; |
| @@ -291,11 +292,10 @@ static void mousedev_notify_readers(struct mousedev *mousedev, | |||
| 291 | } | 292 | } |
| 292 | 293 | ||
| 293 | client->pos_x += packet->dx; | 294 | client->pos_x += packet->dx; |
| 294 | client->pos_x = client->pos_x < 0 ? | 295 | client->pos_x = clamp_val(client->pos_x, 0, xres); |
| 295 | 0 : (client->pos_x >= xres ? xres : client->pos_x); | 296 | |
| 296 | client->pos_y += packet->dy; | 297 | client->pos_y += packet->dy; |
| 297 | client->pos_y = client->pos_y < 0 ? | 298 | client->pos_y = clamp_val(client->pos_y, 0, yres); |
| 298 | 0 : (client->pos_y >= yres ? yres : client->pos_y); | ||
| 299 | 299 | ||
| 300 | p->dx += packet->dx; | 300 | p->dx += packet->dx; |
| 301 | p->dy += packet->dy; | 301 | p->dy += packet->dy; |
| @@ -571,44 +571,50 @@ static int mousedev_open(struct inode *inode, struct file *file) | |||
| 571 | return error; | 571 | return error; |
| 572 | } | 572 | } |
| 573 | 573 | ||
| 574 | static inline int mousedev_limit_delta(int delta, int limit) | 574 | static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data) |
| 575 | { | ||
| 576 | return delta > limit ? limit : (delta < -limit ? -limit : delta); | ||
| 577 | } | ||
| 578 | |||
| 579 | static void mousedev_packet(struct mousedev_client *client, | ||
| 580 | signed char *ps2_data) | ||
| 581 | { | 575 | { |
| 582 | struct mousedev_motion *p = &client->packets[client->tail]; | 576 | struct mousedev_motion *p = &client->packets[client->tail]; |
| 577 | s8 dx, dy, dz; | ||
| 578 | |||
| 579 | dx = clamp_val(p->dx, -127, 127); | ||
| 580 | p->dx -= dx; | ||
| 581 | |||
| 582 | dy = clamp_val(p->dy, -127, 127); | ||
| 583 | p->dy -= dy; | ||
| 583 | 584 | ||
| 584 | ps2_data[0] = 0x08 | | 585 | ps2_data[0] = BIT(3); |
| 585 | ((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07); | 586 | ps2_data[0] |= ((dx & BIT(7)) >> 3) | ((dy & BIT(7)) >> 2); |
| 586 | ps2_data[1] = mousedev_limit_delta(p->dx, 127); | 587 | ps2_data[0] |= p->buttons & 0x07; |
| 587 | ps2_data[2] = mousedev_limit_delta(p->dy, 127); | 588 | ps2_data[1] = dx; |
| 588 | p->dx -= ps2_data[1]; | 589 | ps2_data[2] = dy; |
| 589 | p->dy -= ps2_data[2]; | ||
| 590 | 590 | ||
| 591 | switch (client->mode) { | 591 | switch (client->mode) { |
| 592 | case MOUSEDEV_EMUL_EXPS: | 592 | case MOUSEDEV_EMUL_EXPS: |
| 593 | ps2_data[3] = mousedev_limit_delta(p->dz, 7); | 593 | dz = clamp_val(p->dz, -7, 7); |
| 594 | p->dz -= ps2_data[3]; | 594 | p->dz -= dz; |
| 595 | ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1); | 595 | |
| 596 | ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1); | ||
| 596 | client->bufsiz = 4; | 597 | client->bufsiz = 4; |
| 597 | break; | 598 | break; |
| 598 | 599 | ||
| 599 | case MOUSEDEV_EMUL_IMPS: | 600 | case MOUSEDEV_EMUL_IMPS: |
| 600 | ps2_data[0] |= | 601 | dz = clamp_val(p->dz, -127, 127); |
| 601 | ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); | 602 | p->dz -= dz; |
| 602 | ps2_data[3] = mousedev_limit_delta(p->dz, 127); | 603 | |
| 603 | p->dz -= ps2_data[3]; | 604 | ps2_data[0] |= ((p->buttons & 0x10) >> 3) | |
| 605 | ((p->buttons & 0x08) >> 1); | ||
| 606 | ps2_data[3] = dz; | ||
| 607 | |||
| 604 | client->bufsiz = 4; | 608 | client->bufsiz = 4; |
| 605 | break; | 609 | break; |
| 606 | 610 | ||
| 607 | case MOUSEDEV_EMUL_PS2: | 611 | case MOUSEDEV_EMUL_PS2: |
| 608 | default: | 612 | default: |
| 609 | ps2_data[0] |= | ||
| 610 | ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); | ||
| 611 | p->dz = 0; | 613 | p->dz = 0; |
| 614 | |||
| 615 | ps2_data[0] |= ((p->buttons & 0x10) >> 3) | | ||
| 616 | ((p->buttons & 0x08) >> 1); | ||
| 617 | |||
| 612 | client->bufsiz = 3; | 618 | client->bufsiz = 3; |
| 613 | break; | 619 | break; |
| 614 | } | 620 | } |
| @@ -714,7 +720,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer, | |||
| 714 | { | 720 | { |
| 715 | struct mousedev_client *client = file->private_data; | 721 | struct mousedev_client *client = file->private_data; |
| 716 | struct mousedev *mousedev = client->mousedev; | 722 | struct mousedev *mousedev = client->mousedev; |
| 717 | signed char data[sizeof(client->ps2)]; | 723 | u8 data[sizeof(client->ps2)]; |
| 718 | int retval = 0; | 724 | int retval = 0; |
| 719 | 725 | ||
| 720 | if (!client->ready && !client->buffer && mousedev->exist && | 726 | if (!client->ready && !client->buffer && mousedev->exist && |
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c index 7f7e9176f7ea..ae966e333a2f 100644 --- a/drivers/input/rmi4/rmi_f01.c +++ b/drivers/input/rmi4/rmi_f01.c | |||
| @@ -334,7 +334,7 @@ static struct attribute *rmi_f01_attrs[] = { | |||
| 334 | NULL | 334 | NULL |
| 335 | }; | 335 | }; |
| 336 | 336 | ||
| 337 | static struct attribute_group rmi_f01_attr_group = { | 337 | static const struct attribute_group rmi_f01_attr_group = { |
| 338 | .attrs = rmi_f01_attrs, | 338 | .attrs = rmi_f01_attrs, |
| 339 | }; | 339 | }; |
| 340 | 340 | ||
| @@ -570,18 +570,14 @@ static int rmi_f01_probe(struct rmi_function *fn) | |||
| 570 | 570 | ||
| 571 | dev_set_drvdata(&fn->dev, f01); | 571 | dev_set_drvdata(&fn->dev, f01); |
| 572 | 572 | ||
| 573 | error = sysfs_create_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group); | 573 | error = devm_device_add_group(&fn->rmi_dev->dev, &rmi_f01_attr_group); |
| 574 | if (error) | 574 | if (error) |
| 575 | dev_warn(&fn->dev, "Failed to create sysfs group: %d\n", error); | 575 | dev_warn(&fn->dev, |
| 576 | "Failed to create attribute group: %d\n", error); | ||
| 576 | 577 | ||
| 577 | return 0; | 578 | return 0; |
| 578 | } | 579 | } |
| 579 | 580 | ||
| 580 | static void rmi_f01_remove(struct rmi_function *fn) | ||
| 581 | { | ||
| 582 | sysfs_remove_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group); | ||
| 583 | } | ||
| 584 | |||
| 585 | static int rmi_f01_config(struct rmi_function *fn) | 581 | static int rmi_f01_config(struct rmi_function *fn) |
| 586 | { | 582 | { |
| 587 | struct f01_data *f01 = dev_get_drvdata(&fn->dev); | 583 | struct f01_data *f01 = dev_get_drvdata(&fn->dev); |
| @@ -721,7 +717,6 @@ struct rmi_function_handler rmi_f01_handler = { | |||
| 721 | }, | 717 | }, |
| 722 | .func = 0x01, | 718 | .func = 0x01, |
| 723 | .probe = rmi_f01_probe, | 719 | .probe = rmi_f01_probe, |
| 724 | .remove = rmi_f01_remove, | ||
| 725 | .config = rmi_f01_config, | 720 | .config = rmi_f01_config, |
| 726 | .attention = rmi_f01_attention, | 721 | .attention = rmi_f01_attention, |
| 727 | .suspend = rmi_f01_suspend, | 722 | .suspend = rmi_f01_suspend, |
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c index b8ee78e0d61f..4cfe9703a8e7 100644 --- a/drivers/input/rmi4/rmi_f34.c +++ b/drivers/input/rmi4/rmi_f34.c | |||
| @@ -516,7 +516,7 @@ static struct attribute *rmi_firmware_attrs[] = { | |||
| 516 | NULL | 516 | NULL |
| 517 | }; | 517 | }; |
| 518 | 518 | ||
| 519 | static struct attribute_group rmi_firmware_attr_group = { | 519 | static const struct attribute_group rmi_firmware_attr_group = { |
| 520 | .attrs = rmi_firmware_attrs, | 520 | .attrs = rmi_firmware_attrs, |
| 521 | }; | 521 | }; |
| 522 | 522 | ||
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig index c3d05b4d3118..21488c048fa3 100644 --- a/drivers/input/serio/Kconfig +++ b/drivers/input/serio/Kconfig | |||
| @@ -292,6 +292,17 @@ config SERIO_SUN4I_PS2 | |||
| 292 | To compile this driver as a module, choose M here: the | 292 | To compile this driver as a module, choose M here: the |
| 293 | module will be called sun4i-ps2. | 293 | module will be called sun4i-ps2. |
| 294 | 294 | ||
| 295 | config SERIO_GPIO_PS2 | ||
| 296 | tristate "GPIO PS/2 bit banging driver" | ||
| 297 | depends on GPIOLIB | ||
| 298 | help | ||
| 299 | Say Y here if you want PS/2 bit banging support via GPIO. | ||
| 300 | |||
| 301 | To compile this driver as a module, choose M here: the | ||
| 302 | module will be called ps2-gpio. | ||
| 303 | |||
| 304 | If you are unsure, say N. | ||
| 305 | |||
| 295 | config USERIO | 306 | config USERIO |
| 296 | tristate "User space serio port driver support" | 307 | tristate "User space serio port driver support" |
| 297 | help | 308 | help |
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile index 2374ef9b33d7..767bd9b6e1ed 100644 --- a/drivers/input/serio/Makefile +++ b/drivers/input/serio/Makefile | |||
| @@ -30,4 +30,5 @@ obj-$(CONFIG_SERIO_APBPS2) += apbps2.o | |||
| 30 | obj-$(CONFIG_SERIO_OLPC_APSP) += olpc_apsp.o | 30 | obj-$(CONFIG_SERIO_OLPC_APSP) += olpc_apsp.o |
| 31 | obj-$(CONFIG_HYPERV_KEYBOARD) += hyperv-keyboard.o | 31 | obj-$(CONFIG_HYPERV_KEYBOARD) += hyperv-keyboard.o |
| 32 | obj-$(CONFIG_SERIO_SUN4I_PS2) += sun4i-ps2.o | 32 | obj-$(CONFIG_SERIO_SUN4I_PS2) += sun4i-ps2.o |
| 33 | obj-$(CONFIG_SERIO_GPIO_PS2) += ps2-gpio.o | ||
| 33 | obj-$(CONFIG_USERIO) += userio.o | 34 | obj-$(CONFIG_USERIO) += userio.o |
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c index c6606cacb6a7..ff3875cf3da1 100644 --- a/drivers/input/serio/ambakmi.c +++ b/drivers/input/serio/ambakmi.c | |||
| @@ -187,7 +187,7 @@ static int __maybe_unused amba_kmi_resume(struct device *dev) | |||
| 187 | 187 | ||
| 188 | static SIMPLE_DEV_PM_OPS(amba_kmi_dev_pm_ops, NULL, amba_kmi_resume); | 188 | static SIMPLE_DEV_PM_OPS(amba_kmi_dev_pm_ops, NULL, amba_kmi_resume); |
| 189 | 189 | ||
| 190 | static struct amba_id amba_kmi_idtable[] = { | 190 | static const struct amba_id amba_kmi_idtable[] = { |
| 191 | { | 191 | { |
| 192 | .id = 0x00041050, | 192 | .id = 0x00041050, |
| 193 | .mask = 0x000fffff, | 193 | .mask = 0x000fffff, |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index f932a83b4990..ae81e57e13b9 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
| @@ -927,7 +927,7 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id * | |||
| 927 | return 0; | 927 | return 0; |
| 928 | } | 928 | } |
| 929 | 929 | ||
| 930 | static struct pnp_device_id pnp_kbd_devids[] = { | 930 | static const struct pnp_device_id pnp_kbd_devids[] = { |
| 931 | { .id = "PNP0300", .driver_data = 0 }, | 931 | { .id = "PNP0300", .driver_data = 0 }, |
| 932 | { .id = "PNP0301", .driver_data = 0 }, | 932 | { .id = "PNP0301", .driver_data = 0 }, |
| 933 | { .id = "PNP0302", .driver_data = 0 }, | 933 | { .id = "PNP0302", .driver_data = 0 }, |
| @@ -957,7 +957,7 @@ static struct pnp_driver i8042_pnp_kbd_driver = { | |||
| 957 | }, | 957 | }, |
| 958 | }; | 958 | }; |
| 959 | 959 | ||
| 960 | static struct pnp_device_id pnp_aux_devids[] = { | 960 | static const struct pnp_device_id pnp_aux_devids[] = { |
| 961 | { .id = "AUI0200", .driver_data = 0 }, | 961 | { .id = "AUI0200", .driver_data = 0 }, |
| 962 | { .id = "FJC6000", .driver_data = 0 }, | 962 | { .id = "FJC6000", .driver_data = 0 }, |
| 963 | { .id = "FJC6001", .driver_data = 0 }, | 963 | { .id = "FJC6001", .driver_data = 0 }, |
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c new file mode 100644 index 000000000000..b50e3817f3c4 --- /dev/null +++ b/drivers/input/serio/ps2-gpio.c | |||
| @@ -0,0 +1,453 @@ | |||
| 1 | /* | ||
| 2 | * GPIO based serio bus driver for bit banging the PS/2 protocol | ||
| 3 | * | ||
| 4 | * Author: Danilo Krummrich <danilokrummrich@dk-develop.de> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/gpio/consumer.h> | ||
| 12 | #include <linux/interrupt.h> | ||
| 13 | #include <linux/module.h> | ||
| 14 | #include <linux/serio.h> | ||
| 15 | #include <linux/slab.h> | ||
| 16 | #include <linux/platform_device.h> | ||
| 17 | #include <linux/workqueue.h> | ||
| 18 | #include <linux/completion.h> | ||
| 19 | #include <linux/mutex.h> | ||
| 20 | #include <linux/preempt.h> | ||
| 21 | #include <linux/property.h> | ||
| 22 | #include <linux/of.h> | ||
| 23 | #include <linux/jiffies.h> | ||
| 24 | #include <linux/delay.h> | ||
| 25 | |||
| 26 | #define DRIVER_NAME "ps2-gpio" | ||
| 27 | |||
| 28 | #define PS2_MODE_RX 0 | ||
| 29 | #define PS2_MODE_TX 1 | ||
| 30 | |||
| 31 | #define PS2_START_BIT 0 | ||
| 32 | #define PS2_DATA_BIT0 1 | ||
| 33 | #define PS2_DATA_BIT1 2 | ||
| 34 | #define PS2_DATA_BIT2 3 | ||
| 35 | #define PS2_DATA_BIT3 4 | ||
| 36 | #define PS2_DATA_BIT4 5 | ||
| 37 | #define PS2_DATA_BIT5 6 | ||
| 38 | #define PS2_DATA_BIT6 7 | ||
| 39 | #define PS2_DATA_BIT7 8 | ||
| 40 | #define PS2_PARITY_BIT 9 | ||
| 41 | #define PS2_STOP_BIT 10 | ||
| 42 | #define PS2_TX_TIMEOUT 11 | ||
| 43 | #define PS2_ACK_BIT 12 | ||
| 44 | |||
| 45 | #define PS2_DEV_RET_ACK 0xfa | ||
| 46 | #define PS2_DEV_RET_NACK 0xfe | ||
| 47 | |||
| 48 | #define PS2_CMD_RESEND 0xfe | ||
| 49 | |||
| 50 | struct ps2_gpio_data { | ||
| 51 | struct device *dev; | ||
| 52 | struct serio *serio; | ||
| 53 | unsigned char mode; | ||
| 54 | struct gpio_desc *gpio_clk; | ||
| 55 | struct gpio_desc *gpio_data; | ||
| 56 | bool write_enable; | ||
| 57 | int irq; | ||
| 58 | unsigned char rx_cnt; | ||
| 59 | unsigned char rx_byte; | ||
| 60 | unsigned char tx_cnt; | ||
| 61 | unsigned char tx_byte; | ||
| 62 | struct completion tx_done; | ||
| 63 | struct mutex tx_mutex; | ||
| 64 | struct delayed_work tx_work; | ||
| 65 | }; | ||
| 66 | |||
| 67 | static int ps2_gpio_open(struct serio *serio) | ||
| 68 | { | ||
| 69 | struct ps2_gpio_data *drvdata = serio->port_data; | ||
| 70 | |||
| 71 | enable_irq(drvdata->irq); | ||
| 72 | return 0; | ||
| 73 | } | ||
| 74 | |||
| 75 | static void ps2_gpio_close(struct serio *serio) | ||
| 76 | { | ||
| 77 | struct ps2_gpio_data *drvdata = serio->port_data; | ||
| 78 | |||
| 79 | disable_irq(drvdata->irq); | ||
| 80 | } | ||
| 81 | |||
| 82 | static int __ps2_gpio_write(struct serio *serio, unsigned char val) | ||
| 83 | { | ||
| 84 | struct ps2_gpio_data *drvdata = serio->port_data; | ||
| 85 | |||
| 86 | disable_irq_nosync(drvdata->irq); | ||
| 87 | gpiod_direction_output(drvdata->gpio_clk, 0); | ||
| 88 | |||
| 89 | drvdata->mode = PS2_MODE_TX; | ||
| 90 | drvdata->tx_byte = val; | ||
| 91 | |||
| 92 | schedule_delayed_work(&drvdata->tx_work, usecs_to_jiffies(200)); | ||
| 93 | |||
| 94 | return 0; | ||
| 95 | } | ||
| 96 | |||
| 97 | static int ps2_gpio_write(struct serio *serio, unsigned char val) | ||
| 98 | { | ||
| 99 | struct ps2_gpio_data *drvdata = serio->port_data; | ||
| 100 | int ret = 0; | ||
| 101 | |||
| 102 | if (in_task()) { | ||
| 103 | mutex_lock(&drvdata->tx_mutex); | ||
| 104 | __ps2_gpio_write(serio, val); | ||
| 105 | if (!wait_for_completion_timeout(&drvdata->tx_done, | ||
| 106 | msecs_to_jiffies(10000))) | ||
| 107 | ret = SERIO_TIMEOUT; | ||
| 108 | mutex_unlock(&drvdata->tx_mutex); | ||
| 109 | } else { | ||
| 110 | __ps2_gpio_write(serio, val); | ||
| 111 | } | ||
| 112 | |||
| 113 | return ret; | ||
| 114 | } | ||
| 115 | |||
| 116 | static void ps2_gpio_tx_work_fn(struct work_struct *work) | ||
| 117 | { | ||
| 118 | struct delayed_work *dwork = to_delayed_work(work); | ||
| 119 | struct ps2_gpio_data *drvdata = container_of(dwork, | ||
| 120 | struct ps2_gpio_data, | ||
| 121 | tx_work); | ||
| 122 | |||
| 123 | enable_irq(drvdata->irq); | ||
| 124 | gpiod_direction_output(drvdata->gpio_data, 0); | ||
| 125 | gpiod_direction_input(drvdata->gpio_clk); | ||
| 126 | } | ||
| 127 | |||
| 128 | static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata) | ||
| 129 | { | ||
| 130 | unsigned char byte, cnt; | ||
| 131 | int data; | ||
| 132 | int rxflags = 0; | ||
| 133 | static unsigned long old_jiffies; | ||
| 134 | |||
| 135 | byte = drvdata->rx_byte; | ||
| 136 | cnt = drvdata->rx_cnt; | ||
| 137 | |||
| 138 | if (old_jiffies == 0) | ||
| 139 | old_jiffies = jiffies; | ||
| 140 | |||
| 141 | if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) { | ||
| 142 | dev_err(drvdata->dev, | ||
| 143 | "RX: timeout, probably we missed an interrupt\n"); | ||
| 144 | goto err; | ||
| 145 | } | ||
| 146 | old_jiffies = jiffies; | ||
| 147 | |||
| 148 | data = gpiod_get_value(drvdata->gpio_data); | ||
| 149 | if (unlikely(data < 0)) { | ||
| 150 | dev_err(drvdata->dev, "RX: failed to get data gpio val: %d\n", | ||
| 151 | data); | ||
| 152 | goto err; | ||
| 153 | } | ||
| 154 | |||
| 155 | switch (cnt) { | ||
| 156 | case PS2_START_BIT: | ||
| 157 | /* start bit should be low */ | ||
| 158 | if (unlikely(data)) { | ||
| 159 | dev_err(drvdata->dev, "RX: start bit should be low\n"); | ||
| 160 | goto err; | ||
| 161 | } | ||
| 162 | break; | ||
| 163 | case PS2_DATA_BIT0: | ||
| 164 | case PS2_DATA_BIT1: | ||
| 165 | case PS2_DATA_BIT2: | ||
| 166 | case PS2_DATA_BIT3: | ||
| 167 | case PS2_DATA_BIT4: | ||
| 168 | case PS2_DATA_BIT5: | ||
| 169 | case PS2_DATA_BIT6: | ||
| 170 | case PS2_DATA_BIT7: | ||
| 171 | /* processing data bits */ | ||
| 172 | if (data) | ||
| 173 | byte |= (data << (cnt - 1)); | ||
| 174 | break; | ||
| 175 | case PS2_PARITY_BIT: | ||
| 176 | /* check odd parity */ | ||
| 177 | if (!((hweight8(byte) & 1) ^ data)) { | ||
| 178 | rxflags |= SERIO_PARITY; | ||
| 179 | dev_warn(drvdata->dev, "RX: parity error\n"); | ||
| 180 | if (!drvdata->write_enable) | ||
| 181 | goto err; | ||
| 182 | } | ||
| 183 | |||
| 184 | /* Do not send spurious ACK's and NACK's when write fn is | ||
| 185 | * not provided. | ||
| 186 | */ | ||
| 187 | if (!drvdata->write_enable) { | ||
| 188 | if (byte == PS2_DEV_RET_NACK) | ||
| 189 | goto err; | ||
| 190 | else if (byte == PS2_DEV_RET_ACK) | ||
| 191 | break; | ||
| 192 | } | ||
| 193 | |||
| 194 | /* Let's send the data without waiting for the stop bit to be | ||
| 195 | * sent. It may happen that we miss the stop bit. When this | ||
| 196 | * happens we have no way to recover from this, certainly | ||
| 197 | * missing the parity bit would be recognized when processing | ||
| 198 | * the stop bit. When missing both, data is lost. | ||
| 199 | */ | ||
| 200 | serio_interrupt(drvdata->serio, byte, rxflags); | ||
| 201 | dev_dbg(drvdata->dev, "RX: sending byte 0x%x\n", byte); | ||
| 202 | break; | ||
| 203 | case PS2_STOP_BIT: | ||
| 204 | /* stop bit should be high */ | ||
| 205 | if (unlikely(!data)) { | ||
| 206 | dev_err(drvdata->dev, "RX: stop bit should be high\n"); | ||
| 207 | goto err; | ||
| 208 | } | ||
| 209 | cnt = byte = 0; | ||
| 210 | old_jiffies = 0; | ||
| 211 | goto end; /* success */ | ||
| 212 | default: | ||
| 213 | dev_err(drvdata->dev, "RX: got out of sync with the device\n"); | ||
| 214 | goto err; | ||
| 215 | } | ||
| 216 | |||
| 217 | cnt++; | ||
| 218 | goto end; /* success */ | ||
| 219 | |||
| 220 | err: | ||
| 221 | cnt = byte = 0; | ||
| 222 | old_jiffies = 0; | ||
| 223 | __ps2_gpio_write(drvdata->serio, PS2_CMD_RESEND); | ||
| 224 | end: | ||
| 225 | drvdata->rx_cnt = cnt; | ||
| 226 | drvdata->rx_byte = byte; | ||
| 227 | return IRQ_HANDLED; | ||
| 228 | } | ||
| 229 | |||
| 230 | static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata) | ||
| 231 | { | ||
| 232 | unsigned char byte, cnt; | ||
| 233 | int data; | ||
| 234 | static unsigned long old_jiffies; | ||
| 235 | |||
| 236 | cnt = drvdata->tx_cnt; | ||
| 237 | byte = drvdata->tx_byte; | ||
| 238 | |||
| 239 | if (old_jiffies == 0) | ||
| 240 | old_jiffies = jiffies; | ||
| 241 | |||
| 242 | if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) { | ||
| 243 | dev_err(drvdata->dev, | ||
| 244 | "TX: timeout, probably we missed an interrupt\n"); | ||
| 245 | goto err; | ||
| 246 | } | ||
| 247 | old_jiffies = jiffies; | ||
| 248 | |||
| 249 | switch (cnt) { | ||
| 250 | case PS2_START_BIT: | ||
| 251 | /* should never happen */ | ||
| 252 | dev_err(drvdata->dev, | ||
| 253 | "TX: start bit should have been sent already\n"); | ||
| 254 | goto err; | ||
| 255 | case PS2_DATA_BIT0: | ||
| 256 | case PS2_DATA_BIT1: | ||
| 257 | case PS2_DATA_BIT2: | ||
| 258 | case PS2_DATA_BIT3: | ||
| 259 | case PS2_DATA_BIT4: | ||
| 260 | case PS2_DATA_BIT5: | ||
| 261 | case PS2_DATA_BIT6: | ||
| 262 | case PS2_DATA_BIT7: | ||
| 263 | data = byte & BIT(cnt - 1); | ||
| 264 | gpiod_set_value(drvdata->gpio_data, data); | ||
| 265 | break; | ||
| 266 | case PS2_PARITY_BIT: | ||
| 267 | /* do odd parity */ | ||
| 268 | data = !(hweight8(byte) & 1); | ||
| 269 | gpiod_set_value(drvdata->gpio_data, data); | ||
| 270 | break; | ||
| 271 | case PS2_STOP_BIT: | ||
| 272 | /* release data line to generate stop bit */ | ||
| 273 | gpiod_direction_input(drvdata->gpio_data); | ||
| 274 | break; | ||
| 275 | case PS2_TX_TIMEOUT: | ||
| 276 | /* Devices generate one extra clock pulse before sending the | ||
| 277 | * acknowledgment. | ||
| 278 | */ | ||
| 279 | break; | ||
| 280 | case PS2_ACK_BIT: | ||
| 281 | gpiod_direction_input(drvdata->gpio_data); | ||
| 282 | data = gpiod_get_value(drvdata->gpio_data); | ||
| 283 | if (data) { | ||
| 284 | dev_warn(drvdata->dev, "TX: received NACK, retry\n"); | ||
| 285 | goto err; | ||
| 286 | } | ||
| 287 | |||
| 288 | drvdata->mode = PS2_MODE_RX; | ||
| 289 | complete(&drvdata->tx_done); | ||
| 290 | |||
| 291 | cnt = 1; | ||
| 292 | old_jiffies = 0; | ||
| 293 | goto end; /* success */ | ||
| 294 | default: | ||
| 295 | /* Probably we missed the stop bit. Therefore we release data | ||
| 296 | * line and try again. | ||
| 297 | */ | ||
| 298 | gpiod_direction_input(drvdata->gpio_data); | ||
| 299 | dev_err(drvdata->dev, "TX: got out of sync with the device\n"); | ||
| 300 | goto err; | ||
| 301 | } | ||
| 302 | |||
| 303 | cnt++; | ||
| 304 | goto end; /* success */ | ||
| 305 | |||
| 306 | err: | ||
| 307 | cnt = 1; | ||
| 308 | old_jiffies = 0; | ||
| 309 | gpiod_direction_input(drvdata->gpio_data); | ||
| 310 | __ps2_gpio_write(drvdata->serio, drvdata->tx_byte); | ||
| 311 | end: | ||
| 312 | drvdata->tx_cnt = cnt; | ||
| 313 | return IRQ_HANDLED; | ||
| 314 | } | ||
| 315 | |||
| 316 | static irqreturn_t ps2_gpio_irq(int irq, void *dev_id) | ||
| 317 | { | ||
| 318 | struct ps2_gpio_data *drvdata = dev_id; | ||
| 319 | |||
| 320 | return drvdata->mode ? ps2_gpio_irq_tx(drvdata) : | ||
| 321 | ps2_gpio_irq_rx(drvdata); | ||
| 322 | } | ||
| 323 | |||
| 324 | static int ps2_gpio_get_props(struct device *dev, | ||
| 325 | struct ps2_gpio_data *drvdata) | ||
| 326 | { | ||
| 327 | drvdata->gpio_data = devm_gpiod_get(dev, "data", GPIOD_IN); | ||
| 328 | if (IS_ERR(drvdata->gpio_data)) { | ||
| 329 | dev_err(dev, "failed to request data gpio: %ld", | ||
| 330 | PTR_ERR(drvdata->gpio_data)); | ||
| 331 | return PTR_ERR(drvdata->gpio_data); | ||
| 332 | } | ||
| 333 | |||
| 334 | drvdata->gpio_clk = devm_gpiod_get(dev, "clk", GPIOD_IN); | ||
| 335 | if (IS_ERR(drvdata->gpio_clk)) { | ||
| 336 | dev_err(dev, "failed to request clock gpio: %ld", | ||
| 337 | PTR_ERR(drvdata->gpio_clk)); | ||
| 338 | return PTR_ERR(drvdata->gpio_clk); | ||
| 339 | } | ||
| 340 | |||
| 341 | drvdata->write_enable = device_property_read_bool(dev, | ||
| 342 | "write-enable"); | ||
| 343 | |||
| 344 | return 0; | ||
| 345 | } | ||
| 346 | |||
| 347 | static int ps2_gpio_probe(struct platform_device *pdev) | ||
| 348 | { | ||
| 349 | struct ps2_gpio_data *drvdata; | ||
| 350 | struct serio *serio; | ||
| 351 | struct device *dev = &pdev->dev; | ||
| 352 | int error; | ||
| 353 | |||
| 354 | drvdata = devm_kzalloc(dev, sizeof(struct ps2_gpio_data), GFP_KERNEL); | ||
| 355 | serio = kzalloc(sizeof(struct serio), GFP_KERNEL); | ||
| 356 | if (!drvdata || !serio) { | ||
| 357 | error = -ENOMEM; | ||
| 358 | goto err_free_serio; | ||
| 359 | } | ||
| 360 | |||
| 361 | error = ps2_gpio_get_props(dev, drvdata); | ||
| 362 | if (error) | ||
| 363 | goto err_free_serio; | ||
| 364 | |||
| 365 | if (gpiod_cansleep(drvdata->gpio_data) || | ||
| 366 | gpiod_cansleep(drvdata->gpio_clk)) { | ||
| 367 | dev_err(dev, "GPIO data or clk are connected via slow bus\n"); | ||
| 368 | error = -EINVAL; | ||
| 369 | } | ||
| 370 | |||
| 371 | drvdata->irq = platform_get_irq(pdev, 0); | ||
| 372 | if (drvdata->irq < 0) { | ||
| 373 | dev_err(dev, "failed to get irq from platform resource: %d\n", | ||
| 374 | drvdata->irq); | ||
| 375 | error = drvdata->irq; | ||
| 376 | goto err_free_serio; | ||
| 377 | } | ||
| 378 | |||
| 379 | error = devm_request_irq(dev, drvdata->irq, ps2_gpio_irq, | ||
| 380 | IRQF_NO_THREAD, DRIVER_NAME, drvdata); | ||
| 381 | if (error) { | ||
| 382 | dev_err(dev, "failed to request irq %d: %d\n", | ||
| 383 | drvdata->irq, error); | ||
| 384 | goto err_free_serio; | ||
| 385 | } | ||
| 386 | |||
| 387 | /* Keep irq disabled until serio->open is called. */ | ||
| 388 | disable_irq(drvdata->irq); | ||
| 389 | |||
| 390 | serio->id.type = SERIO_8042; | ||
| 391 | serio->open = ps2_gpio_open; | ||
| 392 | serio->close = ps2_gpio_close; | ||
| 393 | /* Write can be enabled in platform/dt data, but possibly it will not | ||
| 394 | * work because of the tough timings. | ||
| 395 | */ | ||
| 396 | serio->write = drvdata->write_enable ? ps2_gpio_write : NULL; | ||
| 397 | serio->port_data = drvdata; | ||
| 398 | serio->dev.parent = dev; | ||
| 399 | strlcpy(serio->name, dev_name(dev), sizeof(serio->name)); | ||
| 400 | strlcpy(serio->phys, dev_name(dev), sizeof(serio->phys)); | ||
| 401 | |||
| 402 | drvdata->serio = serio; | ||
| 403 | drvdata->dev = dev; | ||
| 404 | drvdata->mode = PS2_MODE_RX; | ||
| 405 | |||
| 406 | /* Tx count always starts at 1, as the start bit is sent implicitly by | ||
| 407 | * host-to-device communication initialization. | ||
| 408 | */ | ||
| 409 | drvdata->tx_cnt = 1; | ||
| 410 | |||
| 411 | INIT_DELAYED_WORK(&drvdata->tx_work, ps2_gpio_tx_work_fn); | ||
| 412 | init_completion(&drvdata->tx_done); | ||
| 413 | mutex_init(&drvdata->tx_mutex); | ||
| 414 | |||
| 415 | serio_register_port(serio); | ||
| 416 | platform_set_drvdata(pdev, drvdata); | ||
| 417 | |||
| 418 | return 0; /* success */ | ||
| 419 | |||
| 420 | err_free_serio: | ||
| 421 | kfree(serio); | ||
| 422 | return error; | ||
| 423 | } | ||
| 424 | |||
| 425 | static int ps2_gpio_remove(struct platform_device *pdev) | ||
| 426 | { | ||
| 427 | struct ps2_gpio_data *drvdata = platform_get_drvdata(pdev); | ||
| 428 | |||
| 429 | serio_unregister_port(drvdata->serio); | ||
| 430 | return 0; | ||
| 431 | } | ||
| 432 | |||
| 433 | #if defined(CONFIG_OF) | ||
| 434 | static const struct of_device_id ps2_gpio_match[] = { | ||
| 435 | { .compatible = "ps2-gpio", }, | ||
| 436 | { }, | ||
| 437 | }; | ||
| 438 | MODULE_DEVICE_TABLE(of, ps2_gpio_match); | ||
| 439 | #endif | ||
| 440 | |||
| 441 | static struct platform_driver ps2_gpio_driver = { | ||
| 442 | .probe = ps2_gpio_probe, | ||
| 443 | .remove = ps2_gpio_remove, | ||
| 444 | .driver = { | ||
| 445 | .name = DRIVER_NAME, | ||
| 446 | .of_match_table = of_match_ptr(ps2_gpio_match), | ||
| 447 | }, | ||
| 448 | }; | ||
| 449 | module_platform_driver(ps2_gpio_driver); | ||
| 450 | |||
| 451 | MODULE_AUTHOR("Danilo Krummrich <danilokrummrich@dk-develop.de>"); | ||
| 452 | MODULE_DESCRIPTION("GPIO PS2 driver"); | ||
| 453 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index 30d6230d48f7..24a90c8db5b3 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c | |||
| @@ -469,7 +469,7 @@ static struct attribute *serio_device_id_attrs[] = { | |||
| 469 | NULL | 469 | NULL |
| 470 | }; | 470 | }; |
| 471 | 471 | ||
| 472 | static struct attribute_group serio_id_attr_group = { | 472 | static const struct attribute_group serio_id_attr_group = { |
| 473 | .name = "id", | 473 | .name = "id", |
| 474 | .attrs = serio_device_id_attrs, | 474 | .attrs = serio_device_id_attrs, |
| 475 | }; | 475 | }; |
| @@ -489,7 +489,7 @@ static struct attribute *serio_device_attrs[] = { | |||
| 489 | NULL | 489 | NULL |
| 490 | }; | 490 | }; |
| 491 | 491 | ||
| 492 | static struct attribute_group serio_device_attr_group = { | 492 | static const struct attribute_group serio_device_attr_group = { |
| 493 | .attrs = serio_device_attrs, | 493 | .attrs = serio_device_attrs, |
| 494 | }; | 494 | }; |
| 495 | 495 | ||
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c index 71ef5d65a0c6..516f9fe77a17 100644 --- a/drivers/input/serio/serio_raw.c +++ b/drivers/input/serio/serio_raw.c | |||
| @@ -410,7 +410,7 @@ static void serio_raw_disconnect(struct serio *serio) | |||
| 410 | serio_set_drvdata(serio, NULL); | 410 | serio_set_drvdata(serio, NULL); |
| 411 | } | 411 | } |
| 412 | 412 | ||
| 413 | static struct serio_device_id serio_raw_serio_ids[] = { | 413 | static const struct serio_device_id serio_raw_serio_ids[] = { |
| 414 | { | 414 | { |
| 415 | .type = SERIO_8042, | 415 | .type = SERIO_8042, |
| 416 | .proto = SERIO_ANY, | 416 | .proto = SERIO_ANY, |
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c index 14c40892ed82..07de1b49293c 100644 --- a/drivers/input/serio/xilinx_ps2.c +++ b/drivers/input/serio/xilinx_ps2.c | |||
| @@ -45,8 +45,10 @@ | |||
| 45 | #define XPS2_STATUS_RX_FULL 0x00000001 /* Receive Full */ | 45 | #define XPS2_STATUS_RX_FULL 0x00000001 /* Receive Full */ |
| 46 | #define XPS2_STATUS_TX_FULL 0x00000002 /* Transmit Full */ | 46 | #define XPS2_STATUS_TX_FULL 0x00000002 /* Transmit Full */ |
| 47 | 47 | ||
| 48 | /* Bit definitions for ISR/IER registers. Both the registers have the same bit | 48 | /* |
| 49 | * definitions and are only defined once. */ | 49 | * Bit definitions for ISR/IER registers. Both the registers have the same bit |
| 50 | * definitions and are only defined once. | ||
| 51 | */ | ||
| 50 | #define XPS2_IPIXR_WDT_TOUT 0x00000001 /* Watchdog Timeout Interrupt */ | 52 | #define XPS2_IPIXR_WDT_TOUT 0x00000001 /* Watchdog Timeout Interrupt */ |
| 51 | #define XPS2_IPIXR_TX_NOACK 0x00000002 /* Transmit No ACK Interrupt */ | 53 | #define XPS2_IPIXR_TX_NOACK 0x00000002 /* Transmit No ACK Interrupt */ |
| 52 | #define XPS2_IPIXR_TX_ACK 0x00000004 /* Transmit ACK (Data) Interrupt */ | 54 | #define XPS2_IPIXR_TX_ACK 0x00000004 /* Transmit ACK (Data) Interrupt */ |
| @@ -292,8 +294,10 @@ static int xps2_of_probe(struct platform_device *ofdev) | |||
| 292 | /* Disable all the interrupts, just in case */ | 294 | /* Disable all the interrupts, just in case */ |
| 293 | out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, 0); | 295 | out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, 0); |
| 294 | 296 | ||
| 295 | /* Reset the PS2 device and abort any current transaction, to make sure | 297 | /* |
| 296 | * we have the PS2 in a good state */ | 298 | * Reset the PS2 device and abort any current transaction, |
| 299 | * to make sure we have the PS2 in a good state. | ||
| 300 | */ | ||
| 297 | out_be32(drvdata->base_address + XPS2_SRST_OFFSET, XPS2_SRST_RESET); | 301 | out_be32(drvdata->base_address + XPS2_SRST_OFFSET, XPS2_SRST_RESET); |
| 298 | 302 | ||
| 299 | dev_info(dev, "Xilinx PS2 at 0x%08llX mapped to 0x%p, irq=%d\n", | 303 | dev_info(dev, "Xilinx PS2 at 0x%08llX mapped to 0x%p, irq=%d\n", |
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c index e86e377a90f5..aebb3f9090cd 100644 --- a/drivers/input/tablet/acecad.c +++ b/drivers/input/tablet/acecad.c | |||
| @@ -260,7 +260,7 @@ static void usb_acecad_disconnect(struct usb_interface *intf) | |||
| 260 | kfree(acecad); | 260 | kfree(acecad); |
| 261 | } | 261 | } |
| 262 | 262 | ||
| 263 | static struct usb_device_id usb_acecad_id_table [] = { | 263 | static const struct usb_device_id usb_acecad_id_table[] = { |
| 264 | { USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_FLAIR), .driver_info = 0 }, | 264 | { USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_FLAIR), .driver_info = 0 }, |
| 265 | { USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_302), .driver_info = 1 }, | 265 | { USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_302), .driver_info = 1 }, |
| 266 | { } | 266 | { } |
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index d67547bded3e..0b55e1f375b3 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c | |||
| @@ -1676,7 +1676,7 @@ static struct attribute *aiptek_attributes[] = { | |||
| 1676 | NULL | 1676 | NULL |
| 1677 | }; | 1677 | }; |
| 1678 | 1678 | ||
| 1679 | static struct attribute_group aiptek_attribute_group = { | 1679 | static const struct attribute_group aiptek_attribute_group = { |
| 1680 | .attrs = aiptek_attributes, | 1680 | .attrs = aiptek_attributes, |
| 1681 | }; | 1681 | }; |
| 1682 | 1682 | ||
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c index 4d9d64908b59..a41c3ff7c9af 100644 --- a/drivers/input/tablet/kbtab.c +++ b/drivers/input/tablet/kbtab.c | |||
| @@ -88,7 +88,7 @@ static void kbtab_irq(struct urb *urb) | |||
| 88 | __func__, retval); | 88 | __func__, retval); |
| 89 | } | 89 | } |
| 90 | 90 | ||
| 91 | static struct usb_device_id kbtab_ids[] = { | 91 | static const struct usb_device_id kbtab_ids[] = { |
| 92 | { USB_DEVICE(USB_VENDOR_ID_KBGEAR, 0x1001), .driver_info = 0 }, | 92 | { USB_DEVICE(USB_VENDOR_ID_KBGEAR, 0x1001), .driver_info = 0 }, |
| 93 | { } | 93 | { } |
| 94 | }; | 94 | }; |
diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c index 20ab802461e7..38bfaca48eab 100644 --- a/drivers/input/tablet/wacom_serial4.c +++ b/drivers/input/tablet/wacom_serial4.c | |||
| @@ -594,7 +594,7 @@ free_device: | |||
| 594 | return err; | 594 | return err; |
| 595 | } | 595 | } |
| 596 | 596 | ||
| 597 | static struct serio_device_id wacom_serio_ids[] = { | 597 | static const struct serio_device_id wacom_serio_ids[] = { |
| 598 | { | 598 | { |
| 599 | .type = SERIO_RS232, | 599 | .type = SERIO_RS232, |
| 600 | .proto = SERIO_WACOM_IV, | 600 | .proto = SERIO_WACOM_IV, |
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 735a0be1ad95..a2f45aefce08 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c | |||
| @@ -499,7 +499,7 @@ static struct attribute *ads7846_attributes[] = { | |||
| 499 | NULL, | 499 | NULL, |
| 500 | }; | 500 | }; |
| 501 | 501 | ||
| 502 | static struct attribute_group ads7846_attr_group = { | 502 | static const struct attribute_group ads7846_attr_group = { |
| 503 | .attrs = ads7846_attributes, | 503 | .attrs = ads7846_attributes, |
| 504 | .is_visible = ads7846_is_visible, | 504 | .is_visible = ads7846_is_visible, |
| 505 | }; | 505 | }; |
| @@ -599,7 +599,7 @@ static struct attribute *ads784x_attributes[] = { | |||
| 599 | NULL, | 599 | NULL, |
| 600 | }; | 600 | }; |
| 601 | 601 | ||
| 602 | static struct attribute_group ads784x_attr_group = { | 602 | static const struct attribute_group ads784x_attr_group = { |
| 603 | .attrs = ads784x_attributes, | 603 | .attrs = ads784x_attributes, |
| 604 | }; | 604 | }; |
| 605 | 605 | ||
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index dd042a9b0aaa..7659bc48f1db 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
| 29 | #include <linux/of.h> | 29 | #include <linux/of.h> |
| 30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
| 31 | #include <linux/gpio/consumer.h> | ||
| 31 | #include <asm/unaligned.h> | 32 | #include <asm/unaligned.h> |
| 32 | #include <media/v4l2-device.h> | 33 | #include <media/v4l2-device.h> |
| 33 | #include <media/v4l2-ioctl.h> | 34 | #include <media/v4l2-ioctl.h> |
| @@ -300,6 +301,7 @@ struct mxt_data { | |||
| 300 | u8 multitouch; | 301 | u8 multitouch; |
| 301 | struct t7_config t7_cfg; | 302 | struct t7_config t7_cfg; |
| 302 | struct mxt_dbg dbg; | 303 | struct mxt_dbg dbg; |
| 304 | struct gpio_desc *reset_gpio; | ||
| 303 | 305 | ||
| 304 | /* Cached parameters from object table */ | 306 | /* Cached parameters from object table */ |
| 305 | u16 T5_address; | 307 | u16 T5_address; |
| @@ -3117,11 +3119,9 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
| 3117 | if (IS_ERR(pdata)) | 3119 | if (IS_ERR(pdata)) |
| 3118 | return PTR_ERR(pdata); | 3120 | return PTR_ERR(pdata); |
| 3119 | 3121 | ||
| 3120 | data = kzalloc(sizeof(struct mxt_data), GFP_KERNEL); | 3122 | data = devm_kzalloc(&client->dev, sizeof(struct mxt_data), GFP_KERNEL); |
| 3121 | if (!data) { | 3123 | if (!data) |
| 3122 | dev_err(&client->dev, "Failed to allocate memory\n"); | ||
| 3123 | return -ENOMEM; | 3124 | return -ENOMEM; |
| 3124 | } | ||
| 3125 | 3125 | ||
| 3126 | snprintf(data->phys, sizeof(data->phys), "i2c-%u-%04x/input0", | 3126 | snprintf(data->phys, sizeof(data->phys), "i2c-%u-%04x/input0", |
| 3127 | client->adapter->nr, client->addr); | 3127 | client->adapter->nr, client->addr); |
| @@ -3135,19 +3135,40 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
| 3135 | init_completion(&data->reset_completion); | 3135 | init_completion(&data->reset_completion); |
| 3136 | init_completion(&data->crc_completion); | 3136 | init_completion(&data->crc_completion); |
| 3137 | 3137 | ||
| 3138 | error = request_threaded_irq(client->irq, NULL, mxt_interrupt, | 3138 | data->reset_gpio = devm_gpiod_get_optional(&client->dev, |
| 3139 | pdata->irqflags | IRQF_ONESHOT, | 3139 | "reset", GPIOD_OUT_LOW); |
| 3140 | client->name, data); | 3140 | if (IS_ERR(data->reset_gpio)) { |
| 3141 | error = PTR_ERR(data->reset_gpio); | ||
| 3142 | dev_err(&client->dev, "Failed to get reset gpio: %d\n", error); | ||
| 3143 | return error; | ||
| 3144 | } | ||
| 3145 | |||
| 3146 | error = devm_request_threaded_irq(&client->dev, client->irq, | ||
| 3147 | NULL, mxt_interrupt, | ||
| 3148 | pdata->irqflags | IRQF_ONESHOT, | ||
| 3149 | client->name, data); | ||
| 3141 | if (error) { | 3150 | if (error) { |
| 3142 | dev_err(&client->dev, "Failed to register interrupt\n"); | 3151 | dev_err(&client->dev, "Failed to register interrupt\n"); |
| 3143 | goto err_free_mem; | 3152 | return error; |
| 3153 | } | ||
| 3154 | |||
| 3155 | if (data->reset_gpio) { | ||
| 3156 | data->in_bootloader = true; | ||
| 3157 | msleep(MXT_RESET_TIME); | ||
| 3158 | reinit_completion(&data->bl_completion); | ||
| 3159 | gpiod_set_value(data->reset_gpio, 1); | ||
| 3160 | error = mxt_wait_for_completion(data, &data->bl_completion, | ||
| 3161 | MXT_RESET_TIMEOUT); | ||
| 3162 | if (error) | ||
| 3163 | return error; | ||
| 3164 | data->in_bootloader = false; | ||
| 3144 | } | 3165 | } |
| 3145 | 3166 | ||
| 3146 | disable_irq(client->irq); | 3167 | disable_irq(client->irq); |
| 3147 | 3168 | ||
| 3148 | error = mxt_initialize(data); | 3169 | error = mxt_initialize(data); |
| 3149 | if (error) | 3170 | if (error) |
| 3150 | goto err_free_irq; | 3171 | return error; |
| 3151 | 3172 | ||
| 3152 | error = sysfs_create_group(&client->dev.kobj, &mxt_attr_group); | 3173 | error = sysfs_create_group(&client->dev.kobj, &mxt_attr_group); |
| 3153 | if (error) { | 3174 | if (error) { |
| @@ -3161,10 +3182,6 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
| 3161 | err_free_object: | 3182 | err_free_object: |
| 3162 | mxt_free_input_device(data); | 3183 | mxt_free_input_device(data); |
| 3163 | mxt_free_object_table(data); | 3184 | mxt_free_object_table(data); |
| 3164 | err_free_irq: | ||
| 3165 | free_irq(client->irq, data); | ||
| 3166 | err_free_mem: | ||
| 3167 | kfree(data); | ||
| 3168 | return error; | 3185 | return error; |
| 3169 | } | 3186 | } |
| 3170 | 3187 | ||
| @@ -3172,11 +3189,10 @@ static int mxt_remove(struct i2c_client *client) | |||
| 3172 | { | 3189 | { |
| 3173 | struct mxt_data *data = i2c_get_clientdata(client); | 3190 | struct mxt_data *data = i2c_get_clientdata(client); |
| 3174 | 3191 | ||
| 3192 | disable_irq(data->irq); | ||
| 3175 | sysfs_remove_group(&client->dev.kobj, &mxt_attr_group); | 3193 | sysfs_remove_group(&client->dev.kobj, &mxt_attr_group); |
| 3176 | free_irq(data->irq, data); | ||
| 3177 | mxt_free_input_device(data); | 3194 | mxt_free_input_device(data); |
| 3178 | mxt_free_object_table(data); | 3195 | mxt_free_object_table(data); |
| 3179 | kfree(data); | ||
| 3180 | 3196 | ||
| 3181 | return 0; | 3197 | return 0; |
| 3182 | } | 3198 | } |
diff --git a/drivers/input/touchscreen/dynapro.c b/drivers/input/touchscreen/dynapro.c index 86237a910876..5b1b66fffbe3 100644 --- a/drivers/input/touchscreen/dynapro.c +++ b/drivers/input/touchscreen/dynapro.c | |||
| @@ -164,7 +164,7 @@ static int dynapro_connect(struct serio *serio, struct serio_driver *drv) | |||
| 164 | * The serio driver structure. | 164 | * The serio driver structure. |
| 165 | */ | 165 | */ |
| 166 | 166 | ||
| 167 | static struct serio_device_id dynapro_serio_ids[] = { | 167 | static const struct serio_device_id dynapro_serio_ids[] = { |
| 168 | { | 168 | { |
| 169 | .type = SERIO_RS232, | 169 | .type = SERIO_RS232, |
| 170 | .proto = SERIO_DYNAPRO, | 170 | .proto = SERIO_DYNAPRO, |
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 872750eeca93..0f4cda7282a2 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c | |||
| @@ -1066,7 +1066,7 @@ static struct attribute *elants_attributes[] = { | |||
| 1066 | NULL | 1066 | NULL |
| 1067 | }; | 1067 | }; |
| 1068 | 1068 | ||
| 1069 | static struct attribute_group elants_attribute_group = { | 1069 | static const struct attribute_group elants_attribute_group = { |
| 1070 | .attrs = elants_attributes, | 1070 | .attrs = elants_attributes, |
| 1071 | }; | 1071 | }; |
| 1072 | 1072 | ||
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c index 8051a4b704ea..83433e8efff7 100644 --- a/drivers/input/touchscreen/elo.c +++ b/drivers/input/touchscreen/elo.c | |||
| @@ -381,7 +381,7 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv) | |||
| 381 | * The serio driver structure. | 381 | * The serio driver structure. |
| 382 | */ | 382 | */ |
| 383 | 383 | ||
| 384 | static struct serio_device_id elo_serio_ids[] = { | 384 | static const struct serio_device_id elo_serio_ids[] = { |
| 385 | { | 385 | { |
| 386 | .type = SERIO_RS232, | 386 | .type = SERIO_RS232, |
| 387 | .proto = SERIO_ELO, | 387 | .proto = SERIO_ELO, |
diff --git a/drivers/input/touchscreen/fujitsu_ts.c b/drivers/input/touchscreen/fujitsu_ts.c index d0e46a7e183b..a0fbb454499d 100644 --- a/drivers/input/touchscreen/fujitsu_ts.c +++ b/drivers/input/touchscreen/fujitsu_ts.c | |||
| @@ -151,7 +151,7 @@ static int fujitsu_connect(struct serio *serio, struct serio_driver *drv) | |||
| 151 | /* | 151 | /* |
| 152 | * The serio driver structure. | 152 | * The serio driver structure. |
| 153 | */ | 153 | */ |
| 154 | static struct serio_device_id fujitsu_serio_ids[] = { | 154 | static const struct serio_device_id fujitsu_serio_ids[] = { |
| 155 | { | 155 | { |
| 156 | .type = SERIO_RS232, | 156 | .type = SERIO_RS232, |
| 157 | .proto = SERIO_FUJITSU, | 157 | .proto = SERIO_FUJITSU, |
diff --git a/drivers/input/touchscreen/gunze.c b/drivers/input/touchscreen/gunze.c index e2ee62615273..481586909d28 100644 --- a/drivers/input/touchscreen/gunze.c +++ b/drivers/input/touchscreen/gunze.c | |||
| @@ -162,7 +162,7 @@ static int gunze_connect(struct serio *serio, struct serio_driver *drv) | |||
| 162 | * The serio driver structure. | 162 | * The serio driver structure. |
| 163 | */ | 163 | */ |
| 164 | 164 | ||
| 165 | static struct serio_device_id gunze_serio_ids[] = { | 165 | static const struct serio_device_id gunze_serio_ids[] = { |
| 166 | { | 166 | { |
| 167 | .type = SERIO_RS232, | 167 | .type = SERIO_RS232, |
| 168 | .proto = SERIO_GUNZE, | 168 | .proto = SERIO_GUNZE, |
diff --git a/drivers/input/touchscreen/hampshire.c b/drivers/input/touchscreen/hampshire.c index ecb1e0e01328..eb052d559e54 100644 --- a/drivers/input/touchscreen/hampshire.c +++ b/drivers/input/touchscreen/hampshire.c | |||
| @@ -163,7 +163,7 @@ static int hampshire_connect(struct serio *serio, struct serio_driver *drv) | |||
| 163 | * The serio driver structure. | 163 | * The serio driver structure. |
| 164 | */ | 164 | */ |
| 165 | 165 | ||
| 166 | static struct serio_device_id hampshire_serio_ids[] = { | 166 | static const struct serio_device_id hampshire_serio_ids[] = { |
| 167 | { | 167 | { |
| 168 | .type = SERIO_RS232, | 168 | .type = SERIO_RS232, |
| 169 | .proto = SERIO_HAMPSHIRE, | 169 | .proto = SERIO_HAMPSHIRE, |
diff --git a/drivers/input/touchscreen/inexio.c b/drivers/input/touchscreen/inexio.c index adb80b65a259..b9bc56233ccc 100644 --- a/drivers/input/touchscreen/inexio.c +++ b/drivers/input/touchscreen/inexio.c | |||
| @@ -165,7 +165,7 @@ static int inexio_connect(struct serio *serio, struct serio_driver *drv) | |||
| 165 | * The serio driver structure. | 165 | * The serio driver structure. |
| 166 | */ | 166 | */ |
| 167 | 167 | ||
| 168 | static struct serio_device_id inexio_serio_ids[] = { | 168 | static const struct serio_device_id inexio_serio_ids[] = { |
| 169 | { | 169 | { |
| 170 | .type = SERIO_RS232, | 170 | .type = SERIO_RS232, |
| 171 | .proto = SERIO_INEXIO, | 171 | .proto = SERIO_INEXIO, |
diff --git a/drivers/input/touchscreen/mtouch.c b/drivers/input/touchscreen/mtouch.c index 9b5552a26169..a3707fad4d1c 100644 --- a/drivers/input/touchscreen/mtouch.c +++ b/drivers/input/touchscreen/mtouch.c | |||
| @@ -178,7 +178,7 @@ static int mtouch_connect(struct serio *serio, struct serio_driver *drv) | |||
| 178 | * The serio driver structure. | 178 | * The serio driver structure. |
| 179 | */ | 179 | */ |
| 180 | 180 | ||
| 181 | static struct serio_device_id mtouch_serio_ids[] = { | 181 | static const struct serio_device_id mtouch_serio_ids[] = { |
| 182 | { | 182 | { |
| 183 | .type = SERIO_RS232, | 183 | .type = SERIO_RS232, |
| 184 | .proto = SERIO_MICROTOUCH, | 184 | .proto = SERIO_MICROTOUCH, |
diff --git a/drivers/input/touchscreen/mxs-lradc-ts.c b/drivers/input/touchscreen/mxs-lradc-ts.c index 58c016cd6809..3707e927f770 100644 --- a/drivers/input/touchscreen/mxs-lradc-ts.c +++ b/drivers/input/touchscreen/mxs-lradc-ts.c | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | #include <linux/of_irq.h> | 30 | #include <linux/of_irq.h> |
| 31 | #include <linux/platform_device.h> | 31 | #include <linux/platform_device.h> |
| 32 | 32 | ||
| 33 | const char *mxs_lradc_ts_irq_names[] = { | 33 | static const char * const mxs_lradc_ts_irq_names[] = { |
| 34 | "mxs-lradc-touchscreen", | 34 | "mxs-lradc-touchscreen", |
| 35 | "mxs-lradc-channel6", | 35 | "mxs-lradc-channel6", |
| 36 | "mxs-lradc-channel7", | 36 | "mxs-lradc-channel7", |
| @@ -630,9 +630,11 @@ static int mxs_lradc_ts_probe(struct platform_device *pdev) | |||
| 630 | spin_lock_init(&ts->lock); | 630 | spin_lock_init(&ts->lock); |
| 631 | 631 | ||
| 632 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 632 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 633 | if (!iores) | ||
| 634 | return -EINVAL; | ||
| 633 | ts->base = devm_ioremap(dev, iores->start, resource_size(iores)); | 635 | ts->base = devm_ioremap(dev, iores->start, resource_size(iores)); |
| 634 | if (IS_ERR(ts->base)) | 636 | if (!ts->base) |
| 635 | return PTR_ERR(ts->base); | 637 | return -ENOMEM; |
| 636 | 638 | ||
| 637 | ret = of_property_read_u32(node, "fsl,lradc-touchscreen-wires", | 639 | ret = of_property_read_u32(node, "fsl,lradc-touchscreen-wires", |
| 638 | &ts_wires); | 640 | &ts_wires); |
diff --git a/drivers/input/touchscreen/penmount.c b/drivers/input/touchscreen/penmount.c index 417d87379265..6e6d7fd98cd2 100644 --- a/drivers/input/touchscreen/penmount.c +++ b/drivers/input/touchscreen/penmount.c | |||
| @@ -293,7 +293,7 @@ static int pm_connect(struct serio *serio, struct serio_driver *drv) | |||
| 293 | * The serio driver structure. | 293 | * The serio driver structure. |
| 294 | */ | 294 | */ |
| 295 | 295 | ||
| 296 | static struct serio_device_id pm_serio_ids[] = { | 296 | static const struct serio_device_id pm_serio_ids[] = { |
| 297 | { | 297 | { |
| 298 | .type = SERIO_RS232, | 298 | .type = SERIO_RS232, |
| 299 | .proto = SERIO_PENMOUNT, | 299 | .proto = SERIO_PENMOUNT, |
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c index 1252e49ccfa1..4f1d3fd5d412 100644 --- a/drivers/input/touchscreen/raydium_i2c_ts.c +++ b/drivers/input/touchscreen/raydium_i2c_ts.c | |||
| @@ -939,7 +939,7 @@ static struct attribute *raydium_i2c_attributes[] = { | |||
| 939 | NULL | 939 | NULL |
| 940 | }; | 940 | }; |
| 941 | 941 | ||
| 942 | static struct attribute_group raydium_i2c_attribute_group = { | 942 | static const struct attribute_group raydium_i2c_attribute_group = { |
| 943 | .attrs = raydium_i2c_attributes, | 943 | .attrs = raydium_i2c_attributes, |
| 944 | }; | 944 | }; |
| 945 | 945 | ||
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c index d07dd29d4848..d2e14d9e5975 100644 --- a/drivers/input/touchscreen/sun4i-ts.c +++ b/drivers/input/touchscreen/sun4i-ts.c | |||
| @@ -206,7 +206,7 @@ static int sun4i_get_tz_temp(void *data, int *temp) | |||
| 206 | return sun4i_get_temp(data, temp); | 206 | return sun4i_get_temp(data, temp); |
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | static struct thermal_zone_of_device_ops sun4i_ts_tz_ops = { | 209 | static const struct thermal_zone_of_device_ops sun4i_ts_tz_ops = { |
| 210 | .get_temp = sun4i_get_tz_temp, | 210 | .get_temp = sun4i_get_tz_temp, |
| 211 | }; | 211 | }; |
| 212 | 212 | ||
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index 128e5bd74720..f16f8358c70a 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c | |||
| @@ -59,7 +59,7 @@ struct sur40_blob { | |||
| 59 | __le16 blob_id; | 59 | __le16 blob_id; |
| 60 | 60 | ||
| 61 | u8 action; /* 0x02 = enter/exit, 0x03 = update (?) */ | 61 | u8 action; /* 0x02 = enter/exit, 0x03 = update (?) */ |
| 62 | u8 unknown; /* always 0x01 or 0x02 (no idea what this is?) */ | 62 | u8 type; /* bitmask (0x01 blob, 0x02 touch, 0x04 tag) */ |
| 63 | 63 | ||
| 64 | __le16 bb_pos_x; /* upper left corner of bounding box */ | 64 | __le16 bb_pos_x; /* upper left corner of bounding box */ |
| 65 | __le16 bb_pos_y; | 65 | __le16 bb_pos_y; |
| @@ -133,12 +133,19 @@ struct sur40_image_header { | |||
| 133 | 133 | ||
| 134 | /* control commands */ | 134 | /* control commands */ |
| 135 | #define SUR40_GET_VERSION 0xb0 /* 12 bytes string */ | 135 | #define SUR40_GET_VERSION 0xb0 /* 12 bytes string */ |
| 136 | #define SUR40_UNKNOWN1 0xb3 /* 5 bytes */ | 136 | #define SUR40_ACCEL_CAPS 0xb3 /* 5 bytes */ |
| 137 | #define SUR40_UNKNOWN2 0xc1 /* 24 bytes */ | 137 | #define SUR40_SENSOR_CAPS 0xc1 /* 24 bytes */ |
| 138 | |||
| 139 | #define SUR40_POKE 0xc5 /* poke register byte */ | ||
| 140 | #define SUR40_PEEK 0xc4 /* 48 bytes registers */ | ||
| 138 | 141 | ||
| 139 | #define SUR40_GET_STATE 0xc5 /* 4 bytes state (?) */ | 142 | #define SUR40_GET_STATE 0xc5 /* 4 bytes state (?) */ |
| 140 | #define SUR40_GET_SENSORS 0xb1 /* 8 bytes sensors */ | 143 | #define SUR40_GET_SENSORS 0xb1 /* 8 bytes sensors */ |
| 141 | 144 | ||
| 145 | #define SUR40_BLOB 0x01 | ||
| 146 | #define SUR40_TOUCH 0x02 | ||
| 147 | #define SUR40_TAG 0x04 | ||
| 148 | |||
| 142 | static const struct v4l2_pix_format sur40_pix_format[] = { | 149 | static const struct v4l2_pix_format sur40_pix_format[] = { |
| 143 | { | 150 | { |
| 144 | .pixelformat = V4L2_TCH_FMT_TU08, | 151 | .pixelformat = V4L2_TCH_FMT_TU08, |
| @@ -238,11 +245,11 @@ static int sur40_init(struct sur40_state *dev) | |||
| 238 | if (result < 0) | 245 | if (result < 0) |
| 239 | goto error; | 246 | goto error; |
| 240 | 247 | ||
| 241 | result = sur40_command(dev, SUR40_UNKNOWN2, 0x00, buffer, 24); | 248 | result = sur40_command(dev, SUR40_SENSOR_CAPS, 0x00, buffer, 24); |
| 242 | if (result < 0) | 249 | if (result < 0) |
| 243 | goto error; | 250 | goto error; |
| 244 | 251 | ||
| 245 | result = sur40_command(dev, SUR40_UNKNOWN1, 0x00, buffer, 5); | 252 | result = sur40_command(dev, SUR40_ACCEL_CAPS, 0x00, buffer, 5); |
| 246 | if (result < 0) | 253 | if (result < 0) |
| 247 | goto error; | 254 | goto error; |
| 248 | 255 | ||
| @@ -289,20 +296,24 @@ static void sur40_close(struct input_polled_dev *polldev) | |||
| 289 | static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input) | 296 | static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input) |
| 290 | { | 297 | { |
| 291 | int wide, major, minor; | 298 | int wide, major, minor; |
| 299 | int bb_size_x, bb_size_y, pos_x, pos_y, ctr_x, ctr_y, slotnum; | ||
| 292 | 300 | ||
| 293 | int bb_size_x = le16_to_cpu(blob->bb_size_x); | 301 | if (blob->type != SUR40_TOUCH) |
| 294 | int bb_size_y = le16_to_cpu(blob->bb_size_y); | 302 | return; |
| 295 | |||
| 296 | int pos_x = le16_to_cpu(blob->pos_x); | ||
| 297 | int pos_y = le16_to_cpu(blob->pos_y); | ||
| 298 | |||
| 299 | int ctr_x = le16_to_cpu(blob->ctr_x); | ||
| 300 | int ctr_y = le16_to_cpu(blob->ctr_y); | ||
| 301 | 303 | ||
| 302 | int slotnum = input_mt_get_slot_by_key(input, blob->blob_id); | 304 | slotnum = input_mt_get_slot_by_key(input, blob->blob_id); |
| 303 | if (slotnum < 0 || slotnum >= MAX_CONTACTS) | 305 | if (slotnum < 0 || slotnum >= MAX_CONTACTS) |
| 304 | return; | 306 | return; |
| 305 | 307 | ||
| 308 | bb_size_x = le16_to_cpu(blob->bb_size_x); | ||
| 309 | bb_size_y = le16_to_cpu(blob->bb_size_y); | ||
| 310 | |||
| 311 | pos_x = le16_to_cpu(blob->pos_x); | ||
| 312 | pos_y = le16_to_cpu(blob->pos_y); | ||
| 313 | |||
| 314 | ctr_x = le16_to_cpu(blob->ctr_x); | ||
| 315 | ctr_y = le16_to_cpu(blob->ctr_y); | ||
| 316 | |||
| 306 | input_mt_slot(input, slotnum); | 317 | input_mt_slot(input, slotnum); |
| 307 | input_mt_report_slot_state(input, MT_TOOL_FINGER, 1); | 318 | input_mt_report_slot_state(input, MT_TOOL_FINGER, 1); |
| 308 | wide = (bb_size_x > bb_size_y); | 319 | wide = (bb_size_x > bb_size_y); |
| @@ -367,10 +378,13 @@ static void sur40_poll(struct input_polled_dev *polldev) | |||
| 367 | /* | 378 | /* |
| 368 | * Sanity check. when video data is also being retrieved, the | 379 | * Sanity check. when video data is also being retrieved, the |
| 369 | * packet ID will usually increase in the middle of a series | 380 | * packet ID will usually increase in the middle of a series |
| 370 | * instead of at the end. | 381 | * instead of at the end. However, the data is still consistent, |
| 371 | */ | 382 | * so the packet ID is probably just valid for the first packet |
| 383 | * in a series. | ||
| 384 | |||
| 372 | if (packet_id != le32_to_cpu(header->packet_id)) | 385 | if (packet_id != le32_to_cpu(header->packet_id)) |
| 373 | dev_dbg(sur40->dev, "packet ID mismatch\n"); | 386 | dev_dbg(sur40->dev, "packet ID mismatch\n"); |
| 387 | */ | ||
| 374 | 388 | ||
| 375 | packet_blobs = result / sizeof(struct sur40_blob); | 389 | packet_blobs = result / sizeof(struct sur40_blob); |
| 376 | dev_dbg(sur40->dev, "received %d blobs\n", packet_blobs); | 390 | dev_dbg(sur40->dev, "received %d blobs\n", packet_blobs); |
diff --git a/drivers/input/touchscreen/touchit213.c b/drivers/input/touchscreen/touchit213.c index c27cf8f3d1ca..98a16698be8e 100644 --- a/drivers/input/touchscreen/touchit213.c +++ b/drivers/input/touchscreen/touchit213.c | |||
| @@ -192,7 +192,7 @@ static int touchit213_connect(struct serio *serio, struct serio_driver *drv) | |||
| 192 | * The serio driver structure. | 192 | * The serio driver structure. |
| 193 | */ | 193 | */ |
| 194 | 194 | ||
| 195 | static struct serio_device_id touchit213_serio_ids[] = { | 195 | static const struct serio_device_id touchit213_serio_ids[] = { |
| 196 | { | 196 | { |
| 197 | .type = SERIO_RS232, | 197 | .type = SERIO_RS232, |
| 198 | .proto = SERIO_TOUCHIT213, | 198 | .proto = SERIO_TOUCHIT213, |
diff --git a/drivers/input/touchscreen/touchright.c b/drivers/input/touchscreen/touchright.c index 4000e5205407..45c325c33f21 100644 --- a/drivers/input/touchscreen/touchright.c +++ b/drivers/input/touchscreen/touchright.c | |||
| @@ -152,7 +152,7 @@ static int tr_connect(struct serio *serio, struct serio_driver *drv) | |||
| 152 | * The serio driver structure. | 152 | * The serio driver structure. |
| 153 | */ | 153 | */ |
| 154 | 154 | ||
| 155 | static struct serio_device_id tr_serio_ids[] = { | 155 | static const struct serio_device_id tr_serio_ids[] = { |
| 156 | { | 156 | { |
| 157 | .type = SERIO_RS232, | 157 | .type = SERIO_RS232, |
| 158 | .proto = SERIO_TOUCHRIGHT, | 158 | .proto = SERIO_TOUCHRIGHT, |
diff --git a/drivers/input/touchscreen/touchwin.c b/drivers/input/touchscreen/touchwin.c index ba90f447df75..2ba6b4ca28cb 100644 --- a/drivers/input/touchscreen/touchwin.c +++ b/drivers/input/touchscreen/touchwin.c | |||
| @@ -159,7 +159,7 @@ static int tw_connect(struct serio *serio, struct serio_driver *drv) | |||
| 159 | * The serio driver structure. | 159 | * The serio driver structure. |
| 160 | */ | 160 | */ |
| 161 | 161 | ||
| 162 | static struct serio_device_id tw_serio_ids[] = { | 162 | static const struct serio_device_id tw_serio_ids[] = { |
| 163 | { | 163 | { |
| 164 | .type = SERIO_RS232, | 164 | .type = SERIO_RS232, |
| 165 | .proto = SERIO_TOUCHWIN, | 165 | .proto = SERIO_TOUCHWIN, |
diff --git a/drivers/input/touchscreen/tsc40.c b/drivers/input/touchscreen/tsc40.c index 29687872cb94..d4ae4ba84c1f 100644 --- a/drivers/input/touchscreen/tsc40.c +++ b/drivers/input/touchscreen/tsc40.c | |||
| @@ -141,7 +141,7 @@ static void tsc_disconnect(struct serio *serio) | |||
| 141 | serio_set_drvdata(serio, NULL); | 141 | serio_set_drvdata(serio, NULL); |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | static struct serio_device_id tsc_serio_ids[] = { | 144 | static const struct serio_device_id tsc_serio_ids[] = { |
| 145 | { | 145 | { |
| 146 | .type = SERIO_RS232, | 146 | .type = SERIO_RS232, |
| 147 | .proto = SERIO_TSC40, | 147 | .proto = SERIO_TSC40, |
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c index 85e95725d0df..3715d1eace92 100644 --- a/drivers/input/touchscreen/wacom_w8001.c +++ b/drivers/input/touchscreen/wacom_w8001.c | |||
| @@ -681,7 +681,7 @@ fail1: | |||
| 681 | return err; | 681 | return err; |
| 682 | } | 682 | } |
| 683 | 683 | ||
| 684 | static struct serio_device_id w8001_serio_ids[] = { | 684 | static const struct serio_device_id w8001_serio_ids[] = { |
| 685 | { | 685 | { |
| 686 | .type = SERIO_RS232, | 686 | .type = SERIO_RS232, |
| 687 | .proto = SERIO_W8001, | 687 | .proto = SERIO_W8001, |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 63cacf5d6cf2..0f1219fa8561 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -3879,11 +3879,9 @@ static void irte_ga_prepare(void *entry, | |||
| 3879 | u8 vector, u32 dest_apicid, int devid) | 3879 | u8 vector, u32 dest_apicid, int devid) |
| 3880 | { | 3880 | { |
| 3881 | struct irte_ga *irte = (struct irte_ga *) entry; | 3881 | struct irte_ga *irte = (struct irte_ga *) entry; |
| 3882 | struct iommu_dev_data *dev_data = search_dev_data(devid); | ||
| 3883 | 3882 | ||
| 3884 | irte->lo.val = 0; | 3883 | irte->lo.val = 0; |
| 3885 | irte->hi.val = 0; | 3884 | irte->hi.val = 0; |
| 3886 | irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0; | ||
| 3887 | irte->lo.fields_remap.int_type = delivery_mode; | 3885 | irte->lo.fields_remap.int_type = delivery_mode; |
| 3888 | irte->lo.fields_remap.dm = dest_mode; | 3886 | irte->lo.fields_remap.dm = dest_mode; |
| 3889 | irte->hi.fields.vector = vector; | 3887 | irte->hi.fields.vector = vector; |
| @@ -3939,10 +3937,10 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index, | |||
| 3939 | struct irte_ga *irte = (struct irte_ga *) entry; | 3937 | struct irte_ga *irte = (struct irte_ga *) entry; |
| 3940 | struct iommu_dev_data *dev_data = search_dev_data(devid); | 3938 | struct iommu_dev_data *dev_data = search_dev_data(devid); |
| 3941 | 3939 | ||
| 3942 | if (!dev_data || !dev_data->use_vapic) { | 3940 | if (!dev_data || !dev_data->use_vapic || |
| 3941 | !irte->lo.fields_remap.guest_mode) { | ||
| 3943 | irte->hi.fields.vector = vector; | 3942 | irte->hi.fields.vector = vector; |
| 3944 | irte->lo.fields_remap.destination = dest_apicid; | 3943 | irte->lo.fields_remap.destination = dest_apicid; |
| 3945 | irte->lo.fields_remap.guest_mode = 0; | ||
| 3946 | modify_irte_ga(devid, index, irte, NULL); | 3944 | modify_irte_ga(devid, index, irte, NULL); |
| 3947 | } | 3945 | } |
| 3948 | } | 3946 | } |
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 9f44ee8ea1bc..19779b88a479 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c | |||
| @@ -118,6 +118,7 @@ static const struct iommu_ops | |||
| 118 | 118 | ||
| 119 | ops = iommu_ops_from_fwnode(fwnode); | 119 | ops = iommu_ops_from_fwnode(fwnode); |
| 120 | if ((ops && !ops->of_xlate) || | 120 | if ((ops && !ops->of_xlate) || |
| 121 | !of_device_is_available(iommu_spec->np) || | ||
| 121 | (!ops && !of_iommu_driver_present(iommu_spec->np))) | 122 | (!ops && !of_iommu_driver_present(iommu_spec->np))) |
| 122 | return NULL; | 123 | return NULL; |
| 123 | 124 | ||
| @@ -236,6 +237,12 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, | |||
| 236 | ops = ERR_PTR(err); | 237 | ops = ERR_PTR(err); |
| 237 | } | 238 | } |
| 238 | 239 | ||
| 240 | /* Ignore all other errors apart from EPROBE_DEFER */ | ||
| 241 | if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) { | ||
| 242 | dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops)); | ||
| 243 | ops = NULL; | ||
| 244 | } | ||
| 245 | |||
| 239 | return ops; | 246 | return ops; |
| 240 | } | 247 | } |
| 241 | 248 | ||
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index eb7fbe159963..929f8558bf1c 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
| @@ -140,7 +140,7 @@ static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe) | |||
| 140 | } | 140 | } |
| 141 | 141 | ||
| 142 | #ifdef CONFIG_CLKSRC_MIPS_GIC | 142 | #ifdef CONFIG_CLKSRC_MIPS_GIC |
| 143 | u64 gic_read_count(void) | 143 | u64 notrace gic_read_count(void) |
| 144 | { | 144 | { |
| 145 | unsigned int hi, hi2, lo; | 145 | unsigned int hi, hi2, lo; |
| 146 | 146 | ||
| @@ -167,7 +167,7 @@ unsigned int gic_get_count_width(void) | |||
| 167 | return bits; | 167 | return bits; |
| 168 | } | 168 | } |
| 169 | 169 | ||
| 170 | void gic_write_compare(u64 cnt) | 170 | void notrace gic_write_compare(u64 cnt) |
| 171 | { | 171 | { |
| 172 | if (mips_cm_is64) { | 172 | if (mips_cm_is64) { |
| 173 | gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); | 173 | gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); |
| @@ -179,7 +179,7 @@ void gic_write_compare(u64 cnt) | |||
| 179 | } | 179 | } |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | void gic_write_cpu_compare(u64 cnt, int cpu) | 182 | void notrace gic_write_cpu_compare(u64 cnt, int cpu) |
| 183 | { | 183 | { |
| 184 | unsigned long flags; | 184 | unsigned long flags; |
| 185 | 185 | ||
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c index bb3ac5fe5846..72a391e01011 100644 --- a/drivers/irqchip/irq-xtensa-mx.c +++ b/drivers/irqchip/irq-xtensa-mx.c | |||
| @@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = { | |||
| 142 | int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) | 142 | int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) |
| 143 | { | 143 | { |
| 144 | struct irq_domain *root_domain = | 144 | struct irq_domain *root_domain = |
| 145 | irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, | 145 | irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, |
| 146 | &xtensa_mx_irq_domain_ops, | 146 | &xtensa_mx_irq_domain_ops, |
| 147 | &xtensa_mx_irq_chip); | 147 | &xtensa_mx_irq_chip); |
| 148 | irq_set_default_host(root_domain); | 148 | irq_set_default_host(root_domain); |
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c index 472ae1770964..f728755fa292 100644 --- a/drivers/irqchip/irq-xtensa-pic.c +++ b/drivers/irqchip/irq-xtensa-pic.c | |||
| @@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = { | |||
| 89 | int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) | 89 | int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) |
| 90 | { | 90 | { |
| 91 | struct irq_domain *root_domain = | 91 | struct irq_domain *root_domain = |
| 92 | irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, | 92 | irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, |
| 93 | &xtensa_irq_domain_ops, &xtensa_irq_chip); | 93 | &xtensa_irq_domain_ops, &xtensa_irq_chip); |
| 94 | irq_set_default_host(root_domain); | 94 | irq_set_default_host(root_domain); |
| 95 | return 0; | 95 | return 0; |
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index d07dd5196ffc..8aa158a09180 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c | |||
| @@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s | |||
| 2364 | id); | 2364 | id); |
| 2365 | return NULL; | 2365 | return NULL; |
| 2366 | } else { | 2366 | } else { |
| 2367 | rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL); | 2367 | rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC); |
| 2368 | if (!rs) | 2368 | if (!rs) |
| 2369 | return NULL; | 2369 | return NULL; |
| 2370 | rs->state = CCPResetIdle; | 2370 | rs->state = CCPResetIdle; |
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index 8b7faea2ddf8..422dced7c90a 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c | |||
| @@ -75,7 +75,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb) | |||
| 75 | if (sk->sk_state != MISDN_BOUND) | 75 | if (sk->sk_state != MISDN_BOUND) |
| 76 | continue; | 76 | continue; |
| 77 | if (!cskb) | 77 | if (!cskb) |
| 78 | cskb = skb_copy(skb, GFP_KERNEL); | 78 | cskb = skb_copy(skb, GFP_ATOMIC); |
| 79 | if (!cskb) { | 79 | if (!cskb) { |
| 80 | printk(KERN_WARNING "%s no skb\n", __func__); | 80 | printk(KERN_WARNING "%s no skb\n", __func__); |
| 81 | break; | 81 | break; |
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c index 1548259297c1..2cfd9389ee96 100644 --- a/drivers/leds/leds-bcm6328.c +++ b/drivers/leds/leds-bcm6328.c | |||
| @@ -242,7 +242,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg, | |||
| 242 | 242 | ||
| 243 | spin_lock_irqsave(lock, flags); | 243 | spin_lock_irqsave(lock, flags); |
| 244 | val = bcm6328_led_read(addr); | 244 | val = bcm6328_led_read(addr); |
| 245 | val |= (BIT(reg) << (((sel % 4) * 4) + 16)); | 245 | val |= (BIT(reg % 4) << (((sel % 4) * 4) + 16)); |
| 246 | bcm6328_led_write(addr, val); | 246 | bcm6328_led_write(addr, val); |
| 247 | spin_unlock_irqrestore(lock, flags); | 247 | spin_unlock_irqrestore(lock, flags); |
| 248 | } | 248 | } |
| @@ -269,7 +269,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg, | |||
| 269 | 269 | ||
| 270 | spin_lock_irqsave(lock, flags); | 270 | spin_lock_irqsave(lock, flags); |
| 271 | val = bcm6328_led_read(addr); | 271 | val = bcm6328_led_read(addr); |
| 272 | val |= (BIT(reg) << ((sel % 4) * 4)); | 272 | val |= (BIT(reg % 4) << ((sel % 4) * 4)); |
| 273 | bcm6328_led_write(addr, val); | 273 | bcm6328_led_write(addr, val); |
| 274 | spin_unlock_irqrestore(lock, flags); | 274 | spin_unlock_irqrestore(lock, flags); |
| 275 | } | 275 | } |
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c index afa3b4099214..e95ea65380c8 100644 --- a/drivers/leds/trigger/ledtrig-heartbeat.c +++ b/drivers/leds/trigger/ledtrig-heartbeat.c | |||
| @@ -20,7 +20,6 @@ | |||
| 20 | #include <linux/sched/loadavg.h> | 20 | #include <linux/sched/loadavg.h> |
| 21 | #include <linux/leds.h> | 21 | #include <linux/leds.h> |
| 22 | #include <linux/reboot.h> | 22 | #include <linux/reboot.h> |
| 23 | #include <linux/suspend.h> | ||
| 24 | #include "../leds.h" | 23 | #include "../leds.h" |
| 25 | 24 | ||
| 26 | static int panic_heartbeats; | 25 | static int panic_heartbeats; |
| @@ -163,30 +162,6 @@ static struct led_trigger heartbeat_led_trigger = { | |||
| 163 | .deactivate = heartbeat_trig_deactivate, | 162 | .deactivate = heartbeat_trig_deactivate, |
| 164 | }; | 163 | }; |
| 165 | 164 | ||
| 166 | static int heartbeat_pm_notifier(struct notifier_block *nb, | ||
| 167 | unsigned long pm_event, void *unused) | ||
| 168 | { | ||
| 169 | int rc; | ||
| 170 | |||
| 171 | switch (pm_event) { | ||
| 172 | case PM_SUSPEND_PREPARE: | ||
| 173 | case PM_HIBERNATION_PREPARE: | ||
| 174 | case PM_RESTORE_PREPARE: | ||
| 175 | led_trigger_unregister(&heartbeat_led_trigger); | ||
| 176 | break; | ||
| 177 | case PM_POST_SUSPEND: | ||
| 178 | case PM_POST_HIBERNATION: | ||
| 179 | case PM_POST_RESTORE: | ||
| 180 | rc = led_trigger_register(&heartbeat_led_trigger); | ||
| 181 | if (rc) | ||
| 182 | pr_err("could not re-register heartbeat trigger\n"); | ||
| 183 | break; | ||
| 184 | default: | ||
| 185 | break; | ||
| 186 | } | ||
| 187 | return NOTIFY_DONE; | ||
| 188 | } | ||
| 189 | |||
| 190 | static int heartbeat_reboot_notifier(struct notifier_block *nb, | 165 | static int heartbeat_reboot_notifier(struct notifier_block *nb, |
| 191 | unsigned long code, void *unused) | 166 | unsigned long code, void *unused) |
| 192 | { | 167 | { |
| @@ -201,10 +176,6 @@ static int heartbeat_panic_notifier(struct notifier_block *nb, | |||
| 201 | return NOTIFY_DONE; | 176 | return NOTIFY_DONE; |
| 202 | } | 177 | } |
| 203 | 178 | ||
| 204 | static struct notifier_block heartbeat_pm_nb = { | ||
| 205 | .notifier_call = heartbeat_pm_notifier, | ||
| 206 | }; | ||
| 207 | |||
| 208 | static struct notifier_block heartbeat_reboot_nb = { | 179 | static struct notifier_block heartbeat_reboot_nb = { |
| 209 | .notifier_call = heartbeat_reboot_notifier, | 180 | .notifier_call = heartbeat_reboot_notifier, |
| 210 | }; | 181 | }; |
| @@ -221,14 +192,12 @@ static int __init heartbeat_trig_init(void) | |||
| 221 | atomic_notifier_chain_register(&panic_notifier_list, | 192 | atomic_notifier_chain_register(&panic_notifier_list, |
| 222 | &heartbeat_panic_nb); | 193 | &heartbeat_panic_nb); |
| 223 | register_reboot_notifier(&heartbeat_reboot_nb); | 194 | register_reboot_notifier(&heartbeat_reboot_nb); |
| 224 | register_pm_notifier(&heartbeat_pm_nb); | ||
| 225 | } | 195 | } |
| 226 | return rc; | 196 | return rc; |
| 227 | } | 197 | } |
| 228 | 198 | ||
| 229 | static void __exit heartbeat_trig_exit(void) | 199 | static void __exit heartbeat_trig_exit(void) |
| 230 | { | 200 | { |
| 231 | unregister_pm_notifier(&heartbeat_pm_nb); | ||
| 232 | unregister_reboot_notifier(&heartbeat_reboot_nb); | 201 | unregister_reboot_notifier(&heartbeat_reboot_nb); |
| 233 | atomic_notifier_chain_unregister(&panic_notifier_list, | 202 | atomic_notifier_chain_unregister(&panic_notifier_list, |
| 234 | &heartbeat_panic_nb); | 203 | &heartbeat_panic_nb); |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index bf7419a56454..f4eace5ea184 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
| @@ -485,10 +485,10 @@ void bitmap_print_sb(struct bitmap *bitmap) | |||
| 485 | pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); | 485 | pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); |
| 486 | pr_debug(" version: %d\n", le32_to_cpu(sb->version)); | 486 | pr_debug(" version: %d\n", le32_to_cpu(sb->version)); |
| 487 | pr_debug(" uuid: %08x.%08x.%08x.%08x\n", | 487 | pr_debug(" uuid: %08x.%08x.%08x.%08x\n", |
| 488 | *(__u32 *)(sb->uuid+0), | 488 | le32_to_cpu(*(__u32 *)(sb->uuid+0)), |
| 489 | *(__u32 *)(sb->uuid+4), | 489 | le32_to_cpu(*(__u32 *)(sb->uuid+4)), |
| 490 | *(__u32 *)(sb->uuid+8), | 490 | le32_to_cpu(*(__u32 *)(sb->uuid+8)), |
| 491 | *(__u32 *)(sb->uuid+12)); | 491 | le32_to_cpu(*(__u32 *)(sb->uuid+12))); |
| 492 | pr_debug(" events: %llu\n", | 492 | pr_debug(" events: %llu\n", |
| 493 | (unsigned long long) le64_to_cpu(sb->events)); | 493 | (unsigned long long) le64_to_cpu(sb->events)); |
| 494 | pr_debug("events cleared: %llu\n", | 494 | pr_debug("events cleared: %llu\n", |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index cd8139593ccd..840c1496b2b1 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
| @@ -1334,7 +1334,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c) | |||
| 1334 | { | 1334 | { |
| 1335 | struct dm_io_request io_req = { | 1335 | struct dm_io_request io_req = { |
| 1336 | .bi_op = REQ_OP_WRITE, | 1336 | .bi_op = REQ_OP_WRITE, |
| 1337 | .bi_op_flags = REQ_PREFLUSH, | 1337 | .bi_op_flags = REQ_PREFLUSH | REQ_SYNC, |
| 1338 | .mem.type = DM_IO_KMEM, | 1338 | .mem.type = DM_IO_KMEM, |
| 1339 | .mem.ptr.addr = NULL, | 1339 | .mem.ptr.addr = NULL, |
| 1340 | .client = c->dm_io, | 1340 | .client = c->dm_io, |
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index c7f7c8d76576..93b181088168 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
| @@ -783,7 +783,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi | |||
| 783 | for (i = 0; i < commit_sections; i++) | 783 | for (i = 0; i < commit_sections; i++) |
| 784 | rw_section_mac(ic, commit_start + i, true); | 784 | rw_section_mac(ic, commit_start + i, true); |
| 785 | } | 785 | } |
| 786 | rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp); | 786 | rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start, |
| 787 | commit_sections, &io_comp); | ||
| 787 | } else { | 788 | } else { |
| 788 | unsigned to_end; | 789 | unsigned to_end; |
| 789 | io_comp.in_flight = (atomic_t)ATOMIC_INIT(2); | 790 | io_comp.in_flight = (atomic_t)ATOMIC_INIT(2); |
| @@ -1104,10 +1105,13 @@ static void schedule_autocommit(struct dm_integrity_c *ic) | |||
| 1104 | static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) | 1105 | static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) |
| 1105 | { | 1106 | { |
| 1106 | struct bio *bio; | 1107 | struct bio *bio; |
| 1107 | spin_lock_irq(&ic->endio_wait.lock); | 1108 | unsigned long flags; |
| 1109 | |||
| 1110 | spin_lock_irqsave(&ic->endio_wait.lock, flags); | ||
| 1108 | bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); | 1111 | bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); |
| 1109 | bio_list_add(&ic->flush_bio_list, bio); | 1112 | bio_list_add(&ic->flush_bio_list, bio); |
| 1110 | spin_unlock_irq(&ic->endio_wait.lock); | 1113 | spin_unlock_irqrestore(&ic->endio_wait.lock, flags); |
| 1114 | |||
| 1111 | queue_work(ic->commit_wq, &ic->commit_work); | 1115 | queue_work(ic->commit_wq, &ic->commit_work); |
| 1112 | } | 1116 | } |
| 1113 | 1117 | ||
| @@ -2374,21 +2378,6 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) | |||
| 2374 | blk_queue_max_integrity_segments(disk->queue, UINT_MAX); | 2378 | blk_queue_max_integrity_segments(disk->queue, UINT_MAX); |
| 2375 | } | 2379 | } |
| 2376 | 2380 | ||
| 2377 | /* FIXME: use new kvmalloc */ | ||
| 2378 | static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp) | ||
| 2379 | { | ||
| 2380 | void *ptr = NULL; | ||
| 2381 | |||
| 2382 | if (size <= PAGE_SIZE) | ||
| 2383 | ptr = kmalloc(size, GFP_KERNEL | gfp); | ||
| 2384 | if (!ptr && size <= KMALLOC_MAX_SIZE) | ||
| 2385 | ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp); | ||
| 2386 | if (!ptr) | ||
| 2387 | ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL); | ||
| 2388 | |||
| 2389 | return ptr; | ||
| 2390 | } | ||
| 2391 | |||
| 2392 | static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl) | 2381 | static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl) |
| 2393 | { | 2382 | { |
| 2394 | unsigned i; | 2383 | unsigned i; |
| @@ -2407,7 +2396,7 @@ static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic) | |||
| 2407 | struct page_list *pl; | 2396 | struct page_list *pl; |
| 2408 | unsigned i; | 2397 | unsigned i; |
| 2409 | 2398 | ||
| 2410 | pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO); | 2399 | pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO); |
| 2411 | if (!pl) | 2400 | if (!pl) |
| 2412 | return NULL; | 2401 | return NULL; |
| 2413 | 2402 | ||
| @@ -2437,7 +2426,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int | |||
| 2437 | struct scatterlist **sl; | 2426 | struct scatterlist **sl; |
| 2438 | unsigned i; | 2427 | unsigned i; |
| 2439 | 2428 | ||
| 2440 | sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO); | 2429 | sl = kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), GFP_KERNEL | __GFP_ZERO); |
| 2441 | if (!sl) | 2430 | if (!sl) |
| 2442 | return NULL; | 2431 | return NULL; |
| 2443 | 2432 | ||
| @@ -2453,7 +2442,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int | |||
| 2453 | 2442 | ||
| 2454 | n_pages = (end_index - start_index + 1); | 2443 | n_pages = (end_index - start_index + 1); |
| 2455 | 2444 | ||
| 2456 | s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0); | 2445 | s = kvmalloc(n_pages * sizeof(struct scatterlist), GFP_KERNEL); |
| 2457 | if (!s) { | 2446 | if (!s) { |
| 2458 | dm_integrity_free_journal_scatterlist(ic, sl); | 2447 | dm_integrity_free_journal_scatterlist(ic, sl); |
| 2459 | return NULL; | 2448 | return NULL; |
| @@ -2617,7 +2606,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) | |||
| 2617 | goto bad; | 2606 | goto bad; |
| 2618 | } | 2607 | } |
| 2619 | 2608 | ||
| 2620 | sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0); | 2609 | sg = kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), GFP_KERNEL); |
| 2621 | if (!sg) { | 2610 | if (!sg) { |
| 2622 | *error = "Unable to allocate sg list"; | 2611 | *error = "Unable to allocate sg list"; |
| 2623 | r = -ENOMEM; | 2612 | r = -ENOMEM; |
| @@ -2673,7 +2662,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) | |||
| 2673 | r = -ENOMEM; | 2662 | r = -ENOMEM; |
| 2674 | goto bad; | 2663 | goto bad; |
| 2675 | } | 2664 | } |
| 2676 | ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO); | 2665 | ic->sk_requests = kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), GFP_KERNEL | __GFP_ZERO); |
| 2677 | if (!ic->sk_requests) { | 2666 | if (!ic->sk_requests) { |
| 2678 | *error = "Unable to allocate sk requests"; | 2667 | *error = "Unable to allocate sk requests"; |
| 2679 | r = -ENOMEM; | 2668 | r = -ENOMEM; |
| @@ -2740,7 +2729,7 @@ retest_commit_id: | |||
| 2740 | r = -ENOMEM; | 2729 | r = -ENOMEM; |
| 2741 | goto bad; | 2730 | goto bad; |
| 2742 | } | 2731 | } |
| 2743 | ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0); | 2732 | ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL); |
| 2744 | if (!ic->journal_tree) { | 2733 | if (!ic->journal_tree) { |
| 2745 | *error = "Could not allocate memory for journal tree"; | 2734 | *error = "Could not allocate memory for journal tree"; |
| 2746 | r = -ENOMEM; | 2735 | r = -ENOMEM; |
| @@ -3054,6 +3043,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 3054 | ti->error = "The device is too small"; | 3043 | ti->error = "The device is too small"; |
| 3055 | goto bad; | 3044 | goto bad; |
| 3056 | } | 3045 | } |
| 3046 | if (ti->len > ic->provided_data_sectors) { | ||
| 3047 | r = -EINVAL; | ||
| 3048 | ti->error = "Not enough provided sectors for requested mapping size"; | ||
| 3049 | goto bad; | ||
| 3050 | } | ||
| 3057 | 3051 | ||
| 3058 | if (!buffer_sectors) | 3052 | if (!buffer_sectors) |
| 3059 | buffer_sectors = 1; | 3053 | buffer_sectors = 1; |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 3702e502466d..8d5ca30f6551 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
| @@ -317,8 +317,8 @@ static void do_region(int op, int op_flags, unsigned region, | |||
| 317 | else if (op == REQ_OP_WRITE_SAME) | 317 | else if (op == REQ_OP_WRITE_SAME) |
| 318 | special_cmd_max_sectors = q->limits.max_write_same_sectors; | 318 | special_cmd_max_sectors = q->limits.max_write_same_sectors; |
| 319 | if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || | 319 | if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || |
| 320 | op == REQ_OP_WRITE_SAME) && | 320 | op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) { |
| 321 | special_cmd_max_sectors == 0) { | 321 | atomic_inc(&io->count); |
| 322 | dec_count(io, region, -EOPNOTSUPP); | 322 | dec_count(io, region, -EOPNOTSUPP); |
| 323 | return; | 323 | return; |
| 324 | } | 324 | } |
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 0555b4410e05..41852ae287a5 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
| @@ -1710,12 +1710,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern | |||
| 1710 | } | 1710 | } |
| 1711 | 1711 | ||
| 1712 | /* | 1712 | /* |
| 1713 | * Try to avoid low memory issues when a device is suspended. | 1713 | * Use __GFP_HIGH to avoid low memory issues when a device is |
| 1714 | * suspended and the ioctl is needed to resume it. | ||
| 1714 | * Use kmalloc() rather than vmalloc() when we can. | 1715 | * Use kmalloc() rather than vmalloc() when we can. |
| 1715 | */ | 1716 | */ |
| 1716 | dmi = NULL; | 1717 | dmi = NULL; |
| 1717 | noio_flag = memalloc_noio_save(); | 1718 | noio_flag = memalloc_noio_save(); |
| 1718 | dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL); | 1719 | dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH); |
| 1719 | memalloc_noio_restore(noio_flag); | 1720 | memalloc_noio_restore(noio_flag); |
| 1720 | 1721 | ||
| 1721 | if (!dmi) { | 1722 | if (!dmi) { |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 7d893228c40f..b4b75dad816a 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
| @@ -1927,7 +1927,7 @@ struct dm_raid_superblock { | |||
| 1927 | /******************************************************************** | 1927 | /******************************************************************** |
| 1928 | * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! | 1928 | * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! |
| 1929 | * | 1929 | * |
| 1930 | * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist | 1930 | * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist |
| 1931 | */ | 1931 | */ |
| 1932 | 1932 | ||
| 1933 | __le32 flags; /* Flags defining array states for reshaping */ | 1933 | __le32 flags; /* Flags defining array states for reshaping */ |
| @@ -2092,6 +2092,11 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev) | |||
| 2092 | sb->layout = cpu_to_le32(mddev->layout); | 2092 | sb->layout = cpu_to_le32(mddev->layout); |
| 2093 | sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); | 2093 | sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); |
| 2094 | 2094 | ||
| 2095 | /******************************************************************** | ||
| 2096 | * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! | ||
| 2097 | * | ||
| 2098 | * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist | ||
| 2099 | */ | ||
| 2095 | sb->new_level = cpu_to_le32(mddev->new_level); | 2100 | sb->new_level = cpu_to_le32(mddev->new_level); |
| 2096 | sb->new_layout = cpu_to_le32(mddev->new_layout); | 2101 | sb->new_layout = cpu_to_le32(mddev->new_layout); |
| 2097 | sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); | 2102 | sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); |
| @@ -2438,8 +2443,14 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev) | |||
| 2438 | mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; | 2443 | mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; |
| 2439 | 2444 | ||
| 2440 | if (!test_and_clear_bit(FirstUse, &rdev->flags)) { | 2445 | if (!test_and_clear_bit(FirstUse, &rdev->flags)) { |
| 2441 | /* Retrieve device size stored in superblock to be prepared for shrink */ | 2446 | /* |
| 2442 | rdev->sectors = le64_to_cpu(sb->sectors); | 2447 | * Retrieve rdev size stored in superblock to be prepared for shrink. |
| 2448 | * Check extended superblock members are present otherwise the size | ||
| 2449 | * will not be set! | ||
| 2450 | */ | ||
| 2451 | if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) | ||
| 2452 | rdev->sectors = le64_to_cpu(sb->sectors); | ||
| 2453 | |||
| 2443 | rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); | 2454 | rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); |
| 2444 | if (rdev->recovery_offset == MaxSector) | 2455 | if (rdev->recovery_offset == MaxSector) |
| 2445 | set_bit(In_sync, &rdev->flags); | 2456 | set_bit(In_sync, &rdev->flags); |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index a95cbb80fb34..4da8858856fb 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
| @@ -145,6 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list) | |||
| 145 | 145 | ||
| 146 | struct dm_raid1_bio_record { | 146 | struct dm_raid1_bio_record { |
| 147 | struct mirror *m; | 147 | struct mirror *m; |
| 148 | /* if details->bi_bdev == NULL, details were not saved */ | ||
| 148 | struct dm_bio_details details; | 149 | struct dm_bio_details details; |
| 149 | region_t write_region; | 150 | region_t write_region; |
| 150 | }; | 151 | }; |
| @@ -260,7 +261,7 @@ static int mirror_flush(struct dm_target *ti) | |||
| 260 | struct mirror *m; | 261 | struct mirror *m; |
| 261 | struct dm_io_request io_req = { | 262 | struct dm_io_request io_req = { |
| 262 | .bi_op = REQ_OP_WRITE, | 263 | .bi_op = REQ_OP_WRITE, |
| 263 | .bi_op_flags = REQ_PREFLUSH, | 264 | .bi_op_flags = REQ_PREFLUSH | REQ_SYNC, |
| 264 | .mem.type = DM_IO_KMEM, | 265 | .mem.type = DM_IO_KMEM, |
| 265 | .mem.ptr.addr = NULL, | 266 | .mem.ptr.addr = NULL, |
| 266 | .client = ms->io_client, | 267 | .client = ms->io_client, |
| @@ -1198,6 +1199,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) | |||
| 1198 | struct dm_raid1_bio_record *bio_record = | 1199 | struct dm_raid1_bio_record *bio_record = |
| 1199 | dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); | 1200 | dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); |
| 1200 | 1201 | ||
| 1202 | bio_record->details.bi_bdev = NULL; | ||
| 1203 | |||
| 1201 | if (rw == WRITE) { | 1204 | if (rw == WRITE) { |
| 1202 | /* Save region for mirror_end_io() handler */ | 1205 | /* Save region for mirror_end_io() handler */ |
| 1203 | bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); | 1206 | bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); |
| @@ -1256,12 +1259,22 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
| 1256 | } | 1259 | } |
| 1257 | 1260 | ||
| 1258 | if (error == -EOPNOTSUPP) | 1261 | if (error == -EOPNOTSUPP) |
| 1259 | return error; | 1262 | goto out; |
| 1260 | 1263 | ||
| 1261 | if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) | 1264 | if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) |
| 1262 | return error; | 1265 | goto out; |
| 1263 | 1266 | ||
| 1264 | if (unlikely(error)) { | 1267 | if (unlikely(error)) { |
| 1268 | if (!bio_record->details.bi_bdev) { | ||
| 1269 | /* | ||
| 1270 | * There wasn't enough memory to record necessary | ||
| 1271 | * information for a retry or there was no other | ||
| 1272 | * mirror in-sync. | ||
| 1273 | */ | ||
| 1274 | DMERR_LIMIT("Mirror read failed."); | ||
| 1275 | return -EIO; | ||
| 1276 | } | ||
| 1277 | |||
| 1265 | m = bio_record->m; | 1278 | m = bio_record->m; |
| 1266 | 1279 | ||
| 1267 | DMERR("Mirror read failed from %s. Trying alternative device.", | 1280 | DMERR("Mirror read failed from %s. Trying alternative device.", |
| @@ -1277,6 +1290,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
| 1277 | bd = &bio_record->details; | 1290 | bd = &bio_record->details; |
| 1278 | 1291 | ||
| 1279 | dm_bio_restore(bd, bio); | 1292 | dm_bio_restore(bd, bio); |
| 1293 | bio_record->details.bi_bdev = NULL; | ||
| 1280 | bio->bi_error = 0; | 1294 | bio->bi_error = 0; |
| 1281 | 1295 | ||
| 1282 | queue_bio(ms, bio, rw); | 1296 | queue_bio(ms, bio, rw); |
| @@ -1285,6 +1299,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
| 1285 | DMERR("All replicated volumes dead, failing I/O"); | 1299 | DMERR("All replicated volumes dead, failing I/O"); |
| 1286 | } | 1300 | } |
| 1287 | 1301 | ||
| 1302 | out: | ||
| 1303 | bio_record->details.bi_bdev = NULL; | ||
| 1304 | |||
| 1288 | return error; | 1305 | return error; |
| 1289 | } | 1306 | } |
| 1290 | 1307 | ||
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index b93476c3ba3f..c5534d294773 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
| @@ -741,7 +741,8 @@ static void persistent_commit_exception(struct dm_exception_store *store, | |||
| 741 | /* | 741 | /* |
| 742 | * Commit exceptions to disk. | 742 | * Commit exceptions to disk. |
| 743 | */ | 743 | */ |
| 744 | if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA)) | 744 | if (ps->valid && area_io(ps, REQ_OP_WRITE, |
| 745 | REQ_PREFLUSH | REQ_FUA | REQ_SYNC)) | ||
| 745 | ps->valid = 0; | 746 | ps->valid = 0; |
| 746 | 747 | ||
| 747 | /* | 748 | /* |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 17ad50daed08..28808e5ec0fd 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -1094,6 +1094,19 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) | |||
| 1094 | return; | 1094 | return; |
| 1095 | } | 1095 | } |
| 1096 | 1096 | ||
| 1097 | /* | ||
| 1098 | * Increment the unmapped blocks. This prevents a race between the | ||
| 1099 | * passdown io and reallocation of freed blocks. | ||
| 1100 | */ | ||
| 1101 | r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); | ||
| 1102 | if (r) { | ||
| 1103 | metadata_operation_failed(pool, "dm_pool_inc_data_range", r); | ||
| 1104 | bio_io_error(m->bio); | ||
| 1105 | cell_defer_no_holder(tc, m->cell); | ||
| 1106 | mempool_free(m, pool->mapping_pool); | ||
| 1107 | return; | ||
| 1108 | } | ||
| 1109 | |||
| 1097 | discard_parent = bio_alloc(GFP_NOIO, 1); | 1110 | discard_parent = bio_alloc(GFP_NOIO, 1); |
| 1098 | if (!discard_parent) { | 1111 | if (!discard_parent) { |
| 1099 | DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.", | 1112 | DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.", |
| @@ -1114,19 +1127,6 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) | |||
| 1114 | end_discard(&op, r); | 1127 | end_discard(&op, r); |
| 1115 | } | 1128 | } |
| 1116 | } | 1129 | } |
| 1117 | |||
| 1118 | /* | ||
| 1119 | * Increment the unmapped blocks. This prevents a race between the | ||
| 1120 | * passdown io and reallocation of freed blocks. | ||
| 1121 | */ | ||
| 1122 | r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); | ||
| 1123 | if (r) { | ||
| 1124 | metadata_operation_failed(pool, "dm_pool_inc_data_range", r); | ||
| 1125 | bio_io_error(m->bio); | ||
| 1126 | cell_defer_no_holder(tc, m->cell); | ||
| 1127 | mempool_free(m, pool->mapping_pool); | ||
| 1128 | return; | ||
| 1129 | } | ||
| 1130 | } | 1130 | } |
| 1131 | 1131 | ||
| 1132 | static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m) | 1132 | static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m) |
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 97de961a3bfc..1ec9b2c51c07 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c | |||
| @@ -166,7 +166,7 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, | |||
| 166 | return r; | 166 | return r; |
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | if (likely(v->version >= 1)) | 169 | if (likely(v->salt_size && (v->version >= 1))) |
| 170 | r = verity_hash_update(v, req, v->salt, v->salt_size, res); | 170 | r = verity_hash_update(v, req, v->salt, v->salt_size, res); |
| 171 | 171 | ||
| 172 | return r; | 172 | return r; |
| @@ -177,7 +177,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, | |||
| 177 | { | 177 | { |
| 178 | int r; | 178 | int r; |
| 179 | 179 | ||
| 180 | if (unlikely(!v->version)) { | 180 | if (unlikely(v->salt_size && (!v->version))) { |
| 181 | r = verity_hash_update(v, req, v->salt, v->salt_size, res); | 181 | r = verity_hash_update(v, req, v->salt, v->salt_size, res); |
| 182 | 182 | ||
| 183 | if (r < 0) { | 183 | if (r < 0) { |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6ef9500226c0..37ccd73c79ec 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -1657,7 +1657,7 @@ static struct mapped_device *alloc_dev(int minor) | |||
| 1657 | 1657 | ||
| 1658 | bio_init(&md->flush_bio, NULL, 0); | 1658 | bio_init(&md->flush_bio, NULL, 0); |
| 1659 | md->flush_bio.bi_bdev = md->bdev; | 1659 | md->flush_bio.bi_bdev = md->bdev; |
| 1660 | md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; | 1660 | md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; |
| 1661 | 1661 | ||
| 1662 | dm_stats_init(&md->stats); | 1662 | dm_stats_init(&md->stats); |
| 1663 | 1663 | ||
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 7299ce2f08a8..03082e17c65c 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c | |||
| @@ -1311,8 +1311,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
| 1311 | cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); | 1311 | cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); |
| 1312 | lock_comm(cinfo, 1); | 1312 | lock_comm(cinfo, 1); |
| 1313 | ret = __sendmsg(cinfo, &cmsg); | 1313 | ret = __sendmsg(cinfo, &cmsg); |
| 1314 | if (ret) | 1314 | if (ret) { |
| 1315 | unlock_comm(cinfo); | ||
| 1315 | return ret; | 1316 | return ret; |
| 1317 | } | ||
| 1316 | cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; | 1318 | cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; |
| 1317 | ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); | 1319 | ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); |
| 1318 | cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; | 1320 | cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 10367ffe92e3..87edc342ccb3 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -765,7 +765,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, | |||
| 765 | test_bit(FailFast, &rdev->flags) && | 765 | test_bit(FailFast, &rdev->flags) && |
| 766 | !test_bit(LastDev, &rdev->flags)) | 766 | !test_bit(LastDev, &rdev->flags)) |
| 767 | ff = MD_FAILFAST; | 767 | ff = MD_FAILFAST; |
| 768 | bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff; | 768 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff; |
| 769 | 769 | ||
| 770 | atomic_inc(&mddev->pending_writes); | 770 | atomic_inc(&mddev->pending_writes); |
| 771 | submit_bio(bio); | 771 | submit_bio(bio); |
| @@ -5174,6 +5174,18 @@ static void mddev_delayed_delete(struct work_struct *ws) | |||
| 5174 | 5174 | ||
| 5175 | static void no_op(struct percpu_ref *r) {} | 5175 | static void no_op(struct percpu_ref *r) {} |
| 5176 | 5176 | ||
| 5177 | int mddev_init_writes_pending(struct mddev *mddev) | ||
| 5178 | { | ||
| 5179 | if (mddev->writes_pending.percpu_count_ptr) | ||
| 5180 | return 0; | ||
| 5181 | if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0) | ||
| 5182 | return -ENOMEM; | ||
| 5183 | /* We want to start with the refcount at zero */ | ||
| 5184 | percpu_ref_put(&mddev->writes_pending); | ||
| 5185 | return 0; | ||
| 5186 | } | ||
| 5187 | EXPORT_SYMBOL_GPL(mddev_init_writes_pending); | ||
| 5188 | |||
| 5177 | static int md_alloc(dev_t dev, char *name) | 5189 | static int md_alloc(dev_t dev, char *name) |
| 5178 | { | 5190 | { |
| 5179 | /* | 5191 | /* |
| @@ -5239,10 +5251,6 @@ static int md_alloc(dev_t dev, char *name) | |||
| 5239 | blk_queue_make_request(mddev->queue, md_make_request); | 5251 | blk_queue_make_request(mddev->queue, md_make_request); |
| 5240 | blk_set_stacking_limits(&mddev->queue->limits); | 5252 | blk_set_stacking_limits(&mddev->queue->limits); |
| 5241 | 5253 | ||
| 5242 | if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0) | ||
| 5243 | goto abort; | ||
| 5244 | /* We want to start with the refcount at zero */ | ||
| 5245 | percpu_ref_put(&mddev->writes_pending); | ||
| 5246 | disk = alloc_disk(1 << shift); | 5254 | disk = alloc_disk(1 << shift); |
| 5247 | if (!disk) { | 5255 | if (!disk) { |
| 5248 | blk_cleanup_queue(mddev->queue); | 5256 | blk_cleanup_queue(mddev->queue); |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 11f15146ce51..0fa1de42c42b 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
| @@ -648,6 +648,7 @@ extern void md_unregister_thread(struct md_thread **threadp); | |||
| 648 | extern void md_wakeup_thread(struct md_thread *thread); | 648 | extern void md_wakeup_thread(struct md_thread *thread); |
| 649 | extern void md_check_recovery(struct mddev *mddev); | 649 | extern void md_check_recovery(struct mddev *mddev); |
| 650 | extern void md_reap_sync_thread(struct mddev *mddev); | 650 | extern void md_reap_sync_thread(struct mddev *mddev); |
| 651 | extern int mddev_init_writes_pending(struct mddev *mddev); | ||
| 651 | extern void md_write_start(struct mddev *mddev, struct bio *bi); | 652 | extern void md_write_start(struct mddev *mddev, struct bio *bi); |
| 652 | extern void md_write_inc(struct mddev *mddev, struct bio *bi); | 653 | extern void md_write_inc(struct mddev *mddev, struct bio *bi); |
| 653 | extern void md_write_end(struct mddev *mddev); | 654 | extern void md_write_end(struct mddev *mddev); |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index af5056d56878..e1a7e3d4c5e4 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -3063,6 +3063,8 @@ static int raid1_run(struct mddev *mddev) | |||
| 3063 | mdname(mddev)); | 3063 | mdname(mddev)); |
| 3064 | return -EIO; | 3064 | return -EIO; |
| 3065 | } | 3065 | } |
| 3066 | if (mddev_init_writes_pending(mddev) < 0) | ||
| 3067 | return -ENOMEM; | ||
| 3066 | /* | 3068 | /* |
| 3067 | * copy the already verified devices into our private RAID1 | 3069 | * copy the already verified devices into our private RAID1 |
| 3068 | * bookkeeping area. [whatever we allocate in run(), | 3070 | * bookkeeping area. [whatever we allocate in run(), |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 4343d7ff9916..797ed60abd5e 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -3611,6 +3611,9 @@ static int raid10_run(struct mddev *mddev) | |||
| 3611 | int first = 1; | 3611 | int first = 1; |
| 3612 | bool discard_supported = false; | 3612 | bool discard_supported = false; |
| 3613 | 3613 | ||
| 3614 | if (mddev_init_writes_pending(mddev) < 0) | ||
| 3615 | return -ENOMEM; | ||
| 3616 | |||
| 3614 | if (mddev->private == NULL) { | 3617 | if (mddev->private == NULL) { |
| 3615 | conf = setup_conf(mddev); | 3618 | conf = setup_conf(mddev); |
| 3616 | if (IS_ERR(conf)) | 3619 | if (IS_ERR(conf)) |
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 4c00bc248287..0a7af8b0a80a 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
| @@ -1782,7 +1782,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, | |||
| 1782 | mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, | 1782 | mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, |
| 1783 | mb, PAGE_SIZE)); | 1783 | mb, PAGE_SIZE)); |
| 1784 | if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, | 1784 | if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, |
| 1785 | REQ_FUA, false)) { | 1785 | REQ_SYNC | REQ_FUA, false)) { |
| 1786 | __free_page(page); | 1786 | __free_page(page); |
| 1787 | return -EIO; | 1787 | return -EIO; |
| 1788 | } | 1788 | } |
| @@ -2388,7 +2388,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, | |||
| 2388 | mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, | 2388 | mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, |
| 2389 | mb, PAGE_SIZE)); | 2389 | mb, PAGE_SIZE)); |
| 2390 | sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, | 2390 | sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, |
| 2391 | REQ_OP_WRITE, REQ_FUA, false); | 2391 | REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false); |
| 2392 | sh->log_start = ctx->pos; | 2392 | sh->log_start = ctx->pos; |
| 2393 | list_add_tail(&sh->r5c, &log->stripe_in_journal_list); | 2393 | list_add_tail(&sh->r5c, &log->stripe_in_journal_list); |
| 2394 | atomic_inc(&log->stripe_in_journal_count); | 2394 | atomic_inc(&log->stripe_in_journal_count); |
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 5d25bebf3328..ccce92e68d7f 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c | |||
| @@ -907,8 +907,8 @@ static int ppl_write_empty_header(struct ppl_log *log) | |||
| 907 | pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE)); | 907 | pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE)); |
| 908 | 908 | ||
| 909 | if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset, | 909 | if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset, |
| 910 | PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_FUA, 0, | 910 | PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC | |
| 911 | false)) { | 911 | REQ_FUA, 0, false)) { |
| 912 | md_error(rdev->mddev, rdev); | 912 | md_error(rdev->mddev, rdev); |
| 913 | ret = -EIO; | 913 | ret = -EIO; |
| 914 | } | 914 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9c4f7659f8b1..ec0f951ae19f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -4085,10 +4085,15 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, | |||
| 4085 | set_bit(STRIPE_INSYNC, &sh->state); | 4085 | set_bit(STRIPE_INSYNC, &sh->state); |
| 4086 | else { | 4086 | else { |
| 4087 | atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); | 4087 | atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); |
| 4088 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) | 4088 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { |
| 4089 | /* don't try to repair!! */ | 4089 | /* don't try to repair!! */ |
| 4090 | set_bit(STRIPE_INSYNC, &sh->state); | 4090 | set_bit(STRIPE_INSYNC, &sh->state); |
| 4091 | else { | 4091 | pr_warn_ratelimited("%s: mismatch sector in range " |
| 4092 | "%llu-%llu\n", mdname(conf->mddev), | ||
| 4093 | (unsigned long long) sh->sector, | ||
| 4094 | (unsigned long long) sh->sector + | ||
| 4095 | STRIPE_SECTORS); | ||
| 4096 | } else { | ||
| 4092 | sh->check_state = check_state_compute_run; | 4097 | sh->check_state = check_state_compute_run; |
| 4093 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); | 4098 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 4094 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); | 4099 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
| @@ -4237,10 +4242,15 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, | |||
| 4237 | } | 4242 | } |
| 4238 | } else { | 4243 | } else { |
| 4239 | atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); | 4244 | atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); |
| 4240 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) | 4245 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { |
| 4241 | /* don't try to repair!! */ | 4246 | /* don't try to repair!! */ |
| 4242 | set_bit(STRIPE_INSYNC, &sh->state); | 4247 | set_bit(STRIPE_INSYNC, &sh->state); |
| 4243 | else { | 4248 | pr_warn_ratelimited("%s: mismatch sector in range " |
| 4249 | "%llu-%llu\n", mdname(conf->mddev), | ||
| 4250 | (unsigned long long) sh->sector, | ||
| 4251 | (unsigned long long) sh->sector + | ||
| 4252 | STRIPE_SECTORS); | ||
| 4253 | } else { | ||
| 4244 | int *target = &sh->ops.target; | 4254 | int *target = &sh->ops.target; |
| 4245 | 4255 | ||
| 4246 | sh->ops.target = -1; | 4256 | sh->ops.target = -1; |
| @@ -7108,6 +7118,9 @@ static int raid5_run(struct mddev *mddev) | |||
| 7108 | long long min_offset_diff = 0; | 7118 | long long min_offset_diff = 0; |
| 7109 | int first = 1; | 7119 | int first = 1; |
| 7110 | 7120 | ||
| 7121 | if (mddev_init_writes_pending(mddev) < 0) | ||
| 7122 | return -ENOMEM; | ||
| 7123 | |||
| 7111 | if (mddev->recovery_cp != MaxSector) | 7124 | if (mddev->recovery_cp != MaxSector) |
| 7112 | pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", | 7125 | pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", |
| 7113 | mdname(mddev)); | 7126 | mdname(mddev)); |
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index b72edd27f880..55d9c2b82b7e 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig | |||
| @@ -2,6 +2,12 @@ | |||
| 2 | # Multimedia device configuration | 2 | # Multimedia device configuration |
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | config CEC_CORE | ||
| 6 | tristate | ||
| 7 | |||
| 8 | config CEC_NOTIFIER | ||
| 9 | bool | ||
| 10 | |||
| 5 | menuconfig MEDIA_SUPPORT | 11 | menuconfig MEDIA_SUPPORT |
| 6 | tristate "Multimedia support" | 12 | tristate "Multimedia support" |
| 7 | depends on HAS_IOMEM | 13 | depends on HAS_IOMEM |
diff --git a/drivers/media/Makefile b/drivers/media/Makefile index 523fea3648ad..044503aa8801 100644 --- a/drivers/media/Makefile +++ b/drivers/media/Makefile | |||
| @@ -4,8 +4,6 @@ | |||
| 4 | 4 | ||
| 5 | media-objs := media-device.o media-devnode.o media-entity.o | 5 | media-objs := media-device.o media-devnode.o media-entity.o |
| 6 | 6 | ||
| 7 | obj-$(CONFIG_CEC_CORE) += cec/ | ||
| 8 | |||
| 9 | # | 7 | # |
| 10 | # I2C drivers should come before other drivers, otherwise they'll fail | 8 | # I2C drivers should come before other drivers, otherwise they'll fail |
| 11 | # when compiled as builtin drivers | 9 | # when compiled as builtin drivers |
| @@ -26,6 +24,8 @@ obj-$(CONFIG_DVB_CORE) += dvb-core/ | |||
| 26 | # There are both core and drivers at RC subtree - merge before drivers | 24 | # There are both core and drivers at RC subtree - merge before drivers |
| 27 | obj-y += rc/ | 25 | obj-y += rc/ |
| 28 | 26 | ||
| 27 | obj-$(CONFIG_CEC_CORE) += cec/ | ||
| 28 | |||
| 29 | # | 29 | # |
| 30 | # Finally, merge the drivers that require the core | 30 | # Finally, merge the drivers that require the core |
| 31 | # | 31 | # |
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig index f944d93e3167..43428cec3a01 100644 --- a/drivers/media/cec/Kconfig +++ b/drivers/media/cec/Kconfig | |||
| @@ -1,19 +1,6 @@ | |||
| 1 | config CEC_CORE | ||
| 2 | tristate | ||
| 3 | depends on MEDIA_CEC_SUPPORT | ||
| 4 | default y | ||
| 5 | |||
| 6 | config MEDIA_CEC_NOTIFIER | ||
| 7 | bool | ||
| 8 | |||
| 9 | config MEDIA_CEC_RC | 1 | config MEDIA_CEC_RC |
| 10 | bool "HDMI CEC RC integration" | 2 | bool "HDMI CEC RC integration" |
| 11 | depends on CEC_CORE && RC_CORE | 3 | depends on CEC_CORE && RC_CORE |
| 4 | depends on CEC_CORE=m || RC_CORE=y | ||
| 12 | ---help--- | 5 | ---help--- |
| 13 | Pass on CEC remote control messages to the RC framework. | 6 | Pass on CEC remote control messages to the RC framework. |
| 14 | |||
| 15 | config MEDIA_CEC_DEBUG | ||
| 16 | bool "HDMI CEC debugfs interface" | ||
| 17 | depends on CEC_CORE && DEBUG_FS | ||
| 18 | ---help--- | ||
| 19 | Turns on the DebugFS interface for CEC devices. | ||
diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile index 402a6c62a3e8..eaf408e64669 100644 --- a/drivers/media/cec/Makefile +++ b/drivers/media/cec/Makefile | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o | 1 | cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o |
| 2 | 2 | ||
| 3 | ifeq ($(CONFIG_MEDIA_CEC_NOTIFIER),y) | 3 | ifeq ($(CONFIG_CEC_NOTIFIER),y) |
| 4 | cec-objs += cec-notifier.o | 4 | cec-objs += cec-notifier.o |
| 5 | endif | 5 | endif |
| 6 | 6 | ||
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index f5fe01c9da8a..9dfc79800c71 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c | |||
| @@ -1864,7 +1864,7 @@ void cec_monitor_all_cnt_dec(struct cec_adapter *adap) | |||
| 1864 | WARN_ON(call_op(adap, adap_monitor_all_enable, 0)); | 1864 | WARN_ON(call_op(adap, adap_monitor_all_enable, 0)); |
| 1865 | } | 1865 | } |
| 1866 | 1866 | ||
| 1867 | #ifdef CONFIG_MEDIA_CEC_DEBUG | 1867 | #ifdef CONFIG_DEBUG_FS |
| 1868 | /* | 1868 | /* |
| 1869 | * Log the current state of the CEC adapter. | 1869 | * Log the current state of the CEC adapter. |
| 1870 | * Very useful for debugging. | 1870 | * Very useful for debugging. |
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c index 0860fb458757..999926f731c8 100644 --- a/drivers/media/cec/cec-api.c +++ b/drivers/media/cec/cec-api.c | |||
| @@ -271,16 +271,10 @@ static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh, | |||
| 271 | bool block, struct cec_msg __user *parg) | 271 | bool block, struct cec_msg __user *parg) |
| 272 | { | 272 | { |
| 273 | struct cec_msg msg = {}; | 273 | struct cec_msg msg = {}; |
| 274 | long err = 0; | 274 | long err; |
| 275 | 275 | ||
| 276 | if (copy_from_user(&msg, parg, sizeof(msg))) | 276 | if (copy_from_user(&msg, parg, sizeof(msg))) |
| 277 | return -EFAULT; | 277 | return -EFAULT; |
| 278 | mutex_lock(&adap->lock); | ||
| 279 | if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR) | ||
| 280 | err = -ENONET; | ||
| 281 | mutex_unlock(&adap->lock); | ||
| 282 | if (err) | ||
| 283 | return err; | ||
| 284 | 278 | ||
| 285 | err = cec_receive_msg(fh, &msg, block); | 279 | err = cec_receive_msg(fh, &msg, block); |
| 286 | if (err) | 280 | if (err) |
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c index f9ebff90f8eb..2f87748ba4fc 100644 --- a/drivers/media/cec/cec-core.c +++ b/drivers/media/cec/cec-core.c | |||
| @@ -187,7 +187,7 @@ static void cec_devnode_unregister(struct cec_devnode *devnode) | |||
| 187 | put_device(&devnode->dev); | 187 | put_device(&devnode->dev); |
| 188 | } | 188 | } |
| 189 | 189 | ||
| 190 | #ifdef CONFIG_MEDIA_CEC_NOTIFIER | 190 | #ifdef CONFIG_CEC_NOTIFIER |
| 191 | static void cec_cec_notify(struct cec_adapter *adap, u16 pa) | 191 | static void cec_cec_notify(struct cec_adapter *adap, u16 pa) |
| 192 | { | 192 | { |
| 193 | cec_s_phys_addr(adap, pa, false); | 193 | cec_s_phys_addr(adap, pa, false); |
| @@ -323,7 +323,7 @@ int cec_register_adapter(struct cec_adapter *adap, | |||
| 323 | } | 323 | } |
| 324 | 324 | ||
| 325 | dev_set_drvdata(&adap->devnode.dev, adap); | 325 | dev_set_drvdata(&adap->devnode.dev, adap); |
| 326 | #ifdef CONFIG_MEDIA_CEC_DEBUG | 326 | #ifdef CONFIG_DEBUG_FS |
| 327 | if (!top_cec_dir) | 327 | if (!top_cec_dir) |
| 328 | return 0; | 328 | return 0; |
| 329 | 329 | ||
| @@ -355,7 +355,7 @@ void cec_unregister_adapter(struct cec_adapter *adap) | |||
| 355 | adap->rc = NULL; | 355 | adap->rc = NULL; |
| 356 | #endif | 356 | #endif |
| 357 | debugfs_remove_recursive(adap->cec_dir); | 357 | debugfs_remove_recursive(adap->cec_dir); |
| 358 | #ifdef CONFIG_MEDIA_CEC_NOTIFIER | 358 | #ifdef CONFIG_CEC_NOTIFIER |
| 359 | if (adap->notifier) | 359 | if (adap->notifier) |
| 360 | cec_notifier_unregister(adap->notifier); | 360 | cec_notifier_unregister(adap->notifier); |
| 361 | #endif | 361 | #endif |
| @@ -395,7 +395,7 @@ static int __init cec_devnode_init(void) | |||
| 395 | return ret; | 395 | return ret; |
| 396 | } | 396 | } |
| 397 | 397 | ||
| 398 | #ifdef CONFIG_MEDIA_CEC_DEBUG | 398 | #ifdef CONFIG_DEBUG_FS |
| 399 | top_cec_dir = debugfs_create_dir("cec", NULL); | 399 | top_cec_dir = debugfs_create_dir("cec", NULL); |
| 400 | if (IS_ERR_OR_NULL(top_cec_dir)) { | 400 | if (IS_ERR_OR_NULL(top_cec_dir)) { |
| 401 | pr_warn("cec: Failed to create debugfs cec dir\n"); | 401 | pr_warn("cec: Failed to create debugfs cec dir\n"); |
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index fd181c99ce11..aaa9471c7d11 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig | |||
| @@ -220,7 +220,8 @@ config VIDEO_ADV7604 | |||
| 220 | 220 | ||
| 221 | config VIDEO_ADV7604_CEC | 221 | config VIDEO_ADV7604_CEC |
| 222 | bool "Enable Analog Devices ADV7604 CEC support" | 222 | bool "Enable Analog Devices ADV7604 CEC support" |
| 223 | depends on VIDEO_ADV7604 && CEC_CORE | 223 | depends on VIDEO_ADV7604 |
| 224 | select CEC_CORE | ||
| 224 | ---help--- | 225 | ---help--- |
| 225 | When selected the adv7604 will support the optional | 226 | When selected the adv7604 will support the optional |
| 226 | HDMI CEC feature. | 227 | HDMI CEC feature. |
| @@ -240,7 +241,8 @@ config VIDEO_ADV7842 | |||
| 240 | 241 | ||
| 241 | config VIDEO_ADV7842_CEC | 242 | config VIDEO_ADV7842_CEC |
| 242 | bool "Enable Analog Devices ADV7842 CEC support" | 243 | bool "Enable Analog Devices ADV7842 CEC support" |
| 243 | depends on VIDEO_ADV7842 && CEC_CORE | 244 | depends on VIDEO_ADV7842 |
| 245 | select CEC_CORE | ||
| 244 | ---help--- | 246 | ---help--- |
| 245 | When selected the adv7842 will support the optional | 247 | When selected the adv7842 will support the optional |
| 246 | HDMI CEC feature. | 248 | HDMI CEC feature. |
| @@ -478,7 +480,8 @@ config VIDEO_ADV7511 | |||
| 478 | 480 | ||
| 479 | config VIDEO_ADV7511_CEC | 481 | config VIDEO_ADV7511_CEC |
| 480 | bool "Enable Analog Devices ADV7511 CEC support" | 482 | bool "Enable Analog Devices ADV7511 CEC support" |
| 481 | depends on VIDEO_ADV7511 && CEC_CORE | 483 | depends on VIDEO_ADV7511 |
| 484 | select CEC_CORE | ||
| 482 | ---help--- | 485 | ---help--- |
| 483 | When selected the adv7511 will support the optional | 486 | When selected the adv7511 will support the optional |
| 484 | HDMI CEC feature. | 487 | HDMI CEC feature. |
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index acef4eca269f..3251cba89e8f 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c | |||
| @@ -223,7 +223,7 @@ static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val) | |||
| 223 | static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg, | 223 | static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg, |
| 224 | u8 mask, u8 val) | 224 | u8 mask, u8 val) |
| 225 | { | 225 | { |
| 226 | i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2); | 226 | i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1); |
| 227 | } | 227 | } |
| 228 | 228 | ||
| 229 | static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg) | 229 | static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg) |
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index ac026ee1ca07..041cb80a26b1 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig | |||
| @@ -501,8 +501,9 @@ if CEC_PLATFORM_DRIVERS | |||
| 501 | 501 | ||
| 502 | config VIDEO_SAMSUNG_S5P_CEC | 502 | config VIDEO_SAMSUNG_S5P_CEC |
| 503 | tristate "Samsung S5P CEC driver" | 503 | tristate "Samsung S5P CEC driver" |
| 504 | depends on CEC_CORE && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST) | 504 | depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST |
| 505 | select MEDIA_CEC_NOTIFIER | 505 | select CEC_CORE |
| 506 | select CEC_NOTIFIER | ||
| 506 | ---help--- | 507 | ---help--- |
| 507 | This is a driver for Samsung S5P HDMI CEC interface. It uses the | 508 | This is a driver for Samsung S5P HDMI CEC interface. It uses the |
| 508 | generic CEC framework interface. | 509 | generic CEC framework interface. |
| @@ -511,8 +512,9 @@ config VIDEO_SAMSUNG_S5P_CEC | |||
| 511 | 512 | ||
| 512 | config VIDEO_STI_HDMI_CEC | 513 | config VIDEO_STI_HDMI_CEC |
| 513 | tristate "STMicroelectronics STiH4xx HDMI CEC driver" | 514 | tristate "STMicroelectronics STiH4xx HDMI CEC driver" |
| 514 | depends on CEC_CORE && (ARCH_STI || COMPILE_TEST) | 515 | depends on ARCH_STI || COMPILE_TEST |
| 515 | select MEDIA_CEC_NOTIFIER | 516 | select CEC_CORE |
| 517 | select CEC_NOTIFIER | ||
| 516 | ---help--- | 518 | ---help--- |
| 517 | This is a driver for STIH4xx HDMI CEC interface. It uses the | 519 | This is a driver for STIH4xx HDMI CEC interface. It uses the |
| 518 | generic CEC framework interface. | 520 | generic CEC framework interface. |
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c index 57a842ff3097..b7731b18ecae 100644 --- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c +++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c | |||
| @@ -493,10 +493,10 @@ static int vdec_h264_get_param(unsigned long h_vdec, | |||
| 493 | } | 493 | } |
| 494 | 494 | ||
| 495 | static struct vdec_common_if vdec_h264_if = { | 495 | static struct vdec_common_if vdec_h264_if = { |
| 496 | vdec_h264_init, | 496 | .init = vdec_h264_init, |
| 497 | vdec_h264_decode, | 497 | .decode = vdec_h264_decode, |
| 498 | vdec_h264_get_param, | 498 | .get_param = vdec_h264_get_param, |
| 499 | vdec_h264_deinit, | 499 | .deinit = vdec_h264_deinit, |
| 500 | }; | 500 | }; |
| 501 | 501 | ||
| 502 | struct vdec_common_if *get_h264_dec_comm_if(void); | 502 | struct vdec_common_if *get_h264_dec_comm_if(void); |
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c index 6e7a62ae0842..b9fad6a48879 100644 --- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c +++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c | |||
| @@ -620,10 +620,10 @@ static void vdec_vp8_deinit(unsigned long h_vdec) | |||
| 620 | } | 620 | } |
| 621 | 621 | ||
| 622 | static struct vdec_common_if vdec_vp8_if = { | 622 | static struct vdec_common_if vdec_vp8_if = { |
| 623 | vdec_vp8_init, | 623 | .init = vdec_vp8_init, |
| 624 | vdec_vp8_decode, | 624 | .decode = vdec_vp8_decode, |
| 625 | vdec_vp8_get_param, | 625 | .get_param = vdec_vp8_get_param, |
| 626 | vdec_vp8_deinit, | 626 | .deinit = vdec_vp8_deinit, |
| 627 | }; | 627 | }; |
| 628 | 628 | ||
| 629 | struct vdec_common_if *get_vp8_dec_comm_if(void); | 629 | struct vdec_common_if *get_vp8_dec_comm_if(void); |
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c index 5539b1853f16..1daee1207469 100644 --- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c +++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c | |||
| @@ -979,10 +979,10 @@ static int vdec_vp9_get_param(unsigned long h_vdec, | |||
| 979 | } | 979 | } |
| 980 | 980 | ||
| 981 | static struct vdec_common_if vdec_vp9_if = { | 981 | static struct vdec_common_if vdec_vp9_if = { |
| 982 | vdec_vp9_init, | 982 | .init = vdec_vp9_init, |
| 983 | vdec_vp9_decode, | 983 | .decode = vdec_vp9_decode, |
| 984 | vdec_vp9_get_param, | 984 | .get_param = vdec_vp9_get_param, |
| 985 | vdec_vp9_deinit, | 985 | .deinit = vdec_vp9_deinit, |
| 986 | }; | 986 | }; |
| 987 | 987 | ||
| 988 | struct vdec_common_if *get_vp9_dec_comm_if(void); | 988 | struct vdec_common_if *get_vp9_dec_comm_if(void); |
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig index b36ac19dc6e4..154de92dd809 100644 --- a/drivers/media/platform/vivid/Kconfig +++ b/drivers/media/platform/vivid/Kconfig | |||
| @@ -26,7 +26,8 @@ config VIDEO_VIVID | |||
| 26 | 26 | ||
| 27 | config VIDEO_VIVID_CEC | 27 | config VIDEO_VIVID_CEC |
| 28 | bool "Enable CEC emulation support" | 28 | bool "Enable CEC emulation support" |
| 29 | depends on VIDEO_VIVID && CEC_CORE | 29 | depends on VIDEO_VIVID |
| 30 | select CEC_CORE | ||
| 30 | ---help--- | 31 | ---help--- |
| 31 | When selected the vivid module will emulate the optional | 32 | When selected the vivid module will emulate the optional |
| 32 | HDMI CEC feature. | 33 | HDMI CEC feature. |
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c index 90f66dc7c0d7..a2fc1a1d58b0 100644 --- a/drivers/media/rc/rc-ir-raw.c +++ b/drivers/media/rc/rc-ir-raw.c | |||
| @@ -211,7 +211,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle); | |||
| 211 | */ | 211 | */ |
| 212 | void ir_raw_event_handle(struct rc_dev *dev) | 212 | void ir_raw_event_handle(struct rc_dev *dev) |
| 213 | { | 213 | { |
| 214 | if (!dev->raw) | 214 | if (!dev->raw || !dev->raw->thread) |
| 215 | return; | 215 | return; |
| 216 | 216 | ||
| 217 | wake_up_process(dev->raw->thread); | 217 | wake_up_process(dev->raw->thread); |
| @@ -490,6 +490,7 @@ int ir_raw_event_register(struct rc_dev *dev) | |||
| 490 | { | 490 | { |
| 491 | int rc; | 491 | int rc; |
| 492 | struct ir_raw_handler *handler; | 492 | struct ir_raw_handler *handler; |
| 493 | struct task_struct *thread; | ||
| 493 | 494 | ||
| 494 | if (!dev) | 495 | if (!dev) |
| 495 | return -EINVAL; | 496 | return -EINVAL; |
| @@ -507,13 +508,15 @@ int ir_raw_event_register(struct rc_dev *dev) | |||
| 507 | * because the event is coming from userspace | 508 | * because the event is coming from userspace |
| 508 | */ | 509 | */ |
| 509 | if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { | 510 | if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { |
| 510 | dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw, | 511 | thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", |
| 511 | "rc%u", dev->minor); | 512 | dev->minor); |
| 512 | 513 | ||
| 513 | if (IS_ERR(dev->raw->thread)) { | 514 | if (IS_ERR(thread)) { |
| 514 | rc = PTR_ERR(dev->raw->thread); | 515 | rc = PTR_ERR(thread); |
| 515 | goto out; | 516 | goto out; |
| 516 | } | 517 | } |
| 518 | |||
| 519 | dev->raw->thread = thread; | ||
| 517 | } | 520 | } |
| 518 | 521 | ||
| 519 | mutex_lock(&ir_raw_handler_lock); | 522 | mutex_lock(&ir_raw_handler_lock); |
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c index e12ec50bf0bf..90a5f8fd5eea 100644 --- a/drivers/media/rc/sir_ir.c +++ b/drivers/media/rc/sir_ir.c | |||
| @@ -183,9 +183,15 @@ static irqreturn_t sir_interrupt(int irq, void *dev_id) | |||
| 183 | static unsigned long delt; | 183 | static unsigned long delt; |
| 184 | unsigned long deltintr; | 184 | unsigned long deltintr; |
| 185 | unsigned long flags; | 185 | unsigned long flags; |
| 186 | int counter = 0; | ||
| 186 | int iir, lsr; | 187 | int iir, lsr; |
| 187 | 188 | ||
| 188 | while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) { | 189 | while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) { |
| 190 | if (++counter > 256) { | ||
| 191 | dev_err(&sir_ir_dev->dev, "Trapped in interrupt"); | ||
| 192 | break; | ||
| 193 | } | ||
| 194 | |||
| 189 | switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */ | 195 | switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */ |
| 190 | case UART_IIR_MSI: | 196 | case UART_IIR_MSI: |
| 191 | (void)inb(io + UART_MSR); | 197 | (void)inb(io + UART_MSR); |
diff --git a/drivers/media/usb/pulse8-cec/Kconfig b/drivers/media/usb/pulse8-cec/Kconfig index 8937f3986a01..18ead44824ba 100644 --- a/drivers/media/usb/pulse8-cec/Kconfig +++ b/drivers/media/usb/pulse8-cec/Kconfig | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | config USB_PULSE8_CEC | 1 | config USB_PULSE8_CEC |
| 2 | tristate "Pulse Eight HDMI CEC" | 2 | tristate "Pulse Eight HDMI CEC" |
| 3 | depends on USB_ACM && CEC_CORE | 3 | depends on USB_ACM |
| 4 | select CEC_CORE | ||
| 4 | select SERIO | 5 | select SERIO |
| 5 | select SERIO_SERPORT | 6 | select SERIO_SERPORT |
| 6 | ---help--- | 7 | ---help--- |
diff --git a/drivers/media/usb/rainshadow-cec/Kconfig b/drivers/media/usb/rainshadow-cec/Kconfig index 3eb86607efb8..030ef01b1ff0 100644 --- a/drivers/media/usb/rainshadow-cec/Kconfig +++ b/drivers/media/usb/rainshadow-cec/Kconfig | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | config USB_RAINSHADOW_CEC | 1 | config USB_RAINSHADOW_CEC |
| 2 | tristate "RainShadow Tech HDMI CEC" | 2 | tristate "RainShadow Tech HDMI CEC" |
| 3 | depends on USB_ACM && CEC_CORE | 3 | depends on USB_ACM |
| 4 | select CEC_CORE | ||
| 4 | select SERIO | 5 | select SERIO |
| 5 | select SERIO_SERPORT | 6 | select SERIO_SERPORT |
| 6 | ---help--- | 7 | ---help--- |
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c index 541ca543f71f..4126552c9055 100644 --- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c +++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c | |||
| @@ -119,7 +119,7 @@ static void rain_irq_work_handler(struct work_struct *work) | |||
| 119 | 119 | ||
| 120 | while (true) { | 120 | while (true) { |
| 121 | unsigned long flags; | 121 | unsigned long flags; |
| 122 | bool exit_loop; | 122 | bool exit_loop = false; |
| 123 | char data; | 123 | char data; |
| 124 | 124 | ||
| 125 | spin_lock_irqsave(&rain->buf_lock, flags); | 125 | spin_lock_irqsave(&rain->buf_lock, flags); |
| @@ -336,6 +336,7 @@ static int rain_connect(struct serio *serio, struct serio_driver *drv) | |||
| 336 | serio_set_drvdata(serio, rain); | 336 | serio_set_drvdata(serio, rain); |
| 337 | INIT_WORK(&rain->work, rain_irq_work_handler); | 337 | INIT_WORK(&rain->work, rain_irq_work_handler); |
| 338 | mutex_init(&rain->write_lock); | 338 | mutex_init(&rain->write_lock); |
| 339 | spin_lock_init(&rain->buf_lock); | ||
| 339 | 340 | ||
| 340 | err = serio_open(serio, drv); | 341 | err = serio_open(serio, drv); |
| 341 | if (err) | 342 | if (err) |
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 94afbbf92807..c0175ea7e7ad 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c | |||
| @@ -868,7 +868,7 @@ EXPORT_SYMBOL_GPL(vb2_core_create_bufs); | |||
| 868 | 868 | ||
| 869 | void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) | 869 | void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) |
| 870 | { | 870 | { |
| 871 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) | 871 | if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) |
| 872 | return NULL; | 872 | return NULL; |
| 873 | 873 | ||
| 874 | return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv); | 874 | return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv); |
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c index 35910f945bfa..99e644cda4d1 100644 --- a/drivers/memory/atmel-ebi.c +++ b/drivers/memory/atmel-ebi.c | |||
| @@ -581,7 +581,7 @@ static int atmel_ebi_probe(struct platform_device *pdev) | |||
| 581 | return of_platform_populate(np, NULL, NULL, dev); | 581 | return of_platform_populate(np, NULL, NULL, dev); |
| 582 | } | 582 | } |
| 583 | 583 | ||
| 584 | static int atmel_ebi_resume(struct device *dev) | 584 | static __maybe_unused int atmel_ebi_resume(struct device *dev) |
| 585 | { | 585 | { |
| 586 | struct atmel_ebi *ebi = dev_get_drvdata(dev); | 586 | struct atmel_ebi *ebi = dev_get_drvdata(dev); |
| 587 | struct atmel_ebi_dev *ebid; | 587 | struct atmel_ebi_dev *ebid; |
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index 75488e65cd96..8d46e3ad9529 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c | |||
| @@ -245,8 +245,7 @@ static int arizona_poll_reg(struct arizona *arizona, | |||
| 245 | int ret; | 245 | int ret; |
| 246 | 246 | ||
| 247 | ret = regmap_read_poll_timeout(arizona->regmap, | 247 | ret = regmap_read_poll_timeout(arizona->regmap, |
| 248 | ARIZONA_INTERRUPT_RAW_STATUS_5, val, | 248 | reg, val, ((val & mask) == target), |
| 249 | ((val & mask) == target), | ||
| 250 | ARIZONA_REG_POLL_DELAY_US, | 249 | ARIZONA_REG_POLL_DELAY_US, |
| 251 | timeout_ms * 1000); | 250 | timeout_ms * 1000); |
| 252 | if (ret) | 251 | if (ret) |
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 4472ce11f98d..8c32040b9c09 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c | |||
| @@ -45,7 +45,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master) | |||
| 45 | mutex_init(&ctx->mapping_lock); | 45 | mutex_init(&ctx->mapping_lock); |
| 46 | ctx->mapping = NULL; | 46 | ctx->mapping = NULL; |
| 47 | 47 | ||
| 48 | if (cxl_is_psl8(afu)) { | 48 | if (cxl_is_power8()) { |
| 49 | spin_lock_init(&ctx->sste_lock); | 49 | spin_lock_init(&ctx->sste_lock); |
| 50 | 50 | ||
| 51 | /* | 51 | /* |
| @@ -189,7 +189,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) | |||
| 189 | if (start + len > ctx->afu->adapter->ps_size) | 189 | if (start + len > ctx->afu->adapter->ps_size) |
| 190 | return -EINVAL; | 190 | return -EINVAL; |
| 191 | 191 | ||
| 192 | if (cxl_is_psl9(ctx->afu)) { | 192 | if (cxl_is_power9()) { |
| 193 | /* | 193 | /* |
| 194 | * Make sure there is a valid problem state | 194 | * Make sure there is a valid problem state |
| 195 | * area space for this AFU. | 195 | * area space for this AFU. |
| @@ -324,7 +324,7 @@ static void reclaim_ctx(struct rcu_head *rcu) | |||
| 324 | { | 324 | { |
| 325 | struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu); | 325 | struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu); |
| 326 | 326 | ||
| 327 | if (cxl_is_psl8(ctx->afu)) | 327 | if (cxl_is_power8()) |
| 328 | free_page((u64)ctx->sstp); | 328 | free_page((u64)ctx->sstp); |
| 329 | if (ctx->ff_page) | 329 | if (ctx->ff_page) |
| 330 | __free_page(ctx->ff_page); | 330 | __free_page(ctx->ff_page); |
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index c8568ea7c518..a03f8e7535e5 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h | |||
| @@ -357,6 +357,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; | |||
| 357 | #define CXL_PSL9_DSISR_An_PF_RGP 0x0000000000000090ULL /* PTE not found (Radix Guest (parent)) 0b10010000 */ | 357 | #define CXL_PSL9_DSISR_An_PF_RGP 0x0000000000000090ULL /* PTE not found (Radix Guest (parent)) 0b10010000 */ |
| 358 | #define CXL_PSL9_DSISR_An_PF_HRH 0x0000000000000094ULL /* PTE not found (HPT/Radix Host) 0b10010100 */ | 358 | #define CXL_PSL9_DSISR_An_PF_HRH 0x0000000000000094ULL /* PTE not found (HPT/Radix Host) 0b10010100 */ |
| 359 | #define CXL_PSL9_DSISR_An_PF_STEG 0x000000000000009CULL /* PTE not found (STEG VA) 0b10011100 */ | 359 | #define CXL_PSL9_DSISR_An_PF_STEG 0x000000000000009CULL /* PTE not found (STEG VA) 0b10011100 */ |
| 360 | #define CXL_PSL9_DSISR_An_URTCH 0x00000000000000B4ULL /* Unsupported Radix Tree Configuration 0b10110100 */ | ||
| 360 | 361 | ||
| 361 | /****** CXL_PSL_TFC_An ******************************************************/ | 362 | /****** CXL_PSL_TFC_An ******************************************************/ |
| 362 | #define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */ | 363 | #define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */ |
| @@ -844,24 +845,15 @@ static inline bool cxl_is_power8(void) | |||
| 844 | 845 | ||
| 845 | static inline bool cxl_is_power9(void) | 846 | static inline bool cxl_is_power9(void) |
| 846 | { | 847 | { |
| 847 | /* intermediate solution */ | 848 | if (pvr_version_is(PVR_POWER9)) |
| 848 | if (!cxl_is_power8() && | ||
| 849 | (cpu_has_feature(CPU_FTRS_POWER9) || | ||
| 850 | cpu_has_feature(CPU_FTR_POWER9_DD1))) | ||
| 851 | return true; | 849 | return true; |
| 852 | return false; | 850 | return false; |
| 853 | } | 851 | } |
| 854 | 852 | ||
| 855 | static inline bool cxl_is_psl8(struct cxl_afu *afu) | 853 | static inline bool cxl_is_power9_dd1(void) |
| 856 | { | 854 | { |
| 857 | if (afu->adapter->caia_major == 1) | 855 | if ((pvr_version_is(PVR_POWER9)) && |
| 858 | return true; | 856 | cpu_has_feature(CPU_FTR_POWER9_DD1)) |
| 859 | return false; | ||
| 860 | } | ||
| 861 | |||
| 862 | static inline bool cxl_is_psl9(struct cxl_afu *afu) | ||
| 863 | { | ||
| 864 | if (afu->adapter->caia_major == 2) | ||
| 865 | return true; | 857 | return true; |
| 866 | return false; | 858 | return false; |
| 867 | } | 859 | } |
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index 5344448f514e..c79e39bad7a4 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c | |||
| @@ -187,7 +187,7 @@ static struct mm_struct *get_mem_context(struct cxl_context *ctx) | |||
| 187 | 187 | ||
| 188 | static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr) | 188 | static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr) |
| 189 | { | 189 | { |
| 190 | if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DS)) | 190 | if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS))) |
| 191 | return true; | 191 | return true; |
| 192 | 192 | ||
| 193 | return false; | 193 | return false; |
| @@ -195,16 +195,23 @@ static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr) | |||
| 195 | 195 | ||
| 196 | static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr) | 196 | static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr) |
| 197 | { | 197 | { |
| 198 | if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DM)) | 198 | u64 crs; /* Translation Checkout Response Status */ |
| 199 | return true; | ||
| 200 | 199 | ||
| 201 | if ((cxl_is_psl9(ctx->afu)) && | 200 | if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM)) |
| 202 | ((dsisr & CXL_PSL9_DSISR_An_CO_MASK) & | ||
| 203 | (CXL_PSL9_DSISR_An_PF_SLR | CXL_PSL9_DSISR_An_PF_RGC | | ||
| 204 | CXL_PSL9_DSISR_An_PF_RGP | CXL_PSL9_DSISR_An_PF_HRH | | ||
| 205 | CXL_PSL9_DSISR_An_PF_STEG))) | ||
| 206 | return true; | 201 | return true; |
| 207 | 202 | ||
| 203 | if (cxl_is_power9()) { | ||
| 204 | crs = (dsisr & CXL_PSL9_DSISR_An_CO_MASK); | ||
| 205 | if ((crs == CXL_PSL9_DSISR_An_PF_SLR) || | ||
| 206 | (crs == CXL_PSL9_DSISR_An_PF_RGC) || | ||
| 207 | (crs == CXL_PSL9_DSISR_An_PF_RGP) || | ||
| 208 | (crs == CXL_PSL9_DSISR_An_PF_HRH) || | ||
| 209 | (crs == CXL_PSL9_DSISR_An_PF_STEG) || | ||
| 210 | (crs == CXL_PSL9_DSISR_An_URTCH)) { | ||
| 211 | return true; | ||
| 212 | } | ||
| 213 | } | ||
| 214 | |||
| 208 | return false; | 215 | return false; |
| 209 | } | 216 | } |
| 210 | 217 | ||
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index 17b433f1ce23..0761271d68c5 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c | |||
| @@ -159,11 +159,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, | |||
| 159 | 159 | ||
| 160 | /* Do this outside the status_mutex to avoid a circular dependency with | 160 | /* Do this outside the status_mutex to avoid a circular dependency with |
| 161 | * the locking in cxl_mmap_fault() */ | 161 | * the locking in cxl_mmap_fault() */ |
| 162 | if (copy_from_user(&work, uwork, | 162 | if (copy_from_user(&work, uwork, sizeof(work))) |
| 163 | sizeof(struct cxl_ioctl_start_work))) { | 163 | return -EFAULT; |
| 164 | rc = -EFAULT; | ||
| 165 | goto out; | ||
| 166 | } | ||
| 167 | 164 | ||
| 168 | mutex_lock(&ctx->status_mutex); | 165 | mutex_lock(&ctx->status_mutex); |
| 169 | if (ctx->status != OPENED) { | 166 | if (ctx->status != OPENED) { |
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index 1703655072b1..c1ba0d42cbc8 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c | |||
| @@ -329,8 +329,15 @@ static int __init init_cxl(void) | |||
| 329 | 329 | ||
| 330 | cxl_debugfs_init(); | 330 | cxl_debugfs_init(); |
| 331 | 331 | ||
| 332 | if ((rc = register_cxl_calls(&cxl_calls))) | 332 | /* |
| 333 | goto err; | 333 | * we don't register the callback on P9. slb callack is only |
| 334 | * used for the PSL8 MMU and CX4. | ||
| 335 | */ | ||
| 336 | if (cxl_is_power8()) { | ||
| 337 | rc = register_cxl_calls(&cxl_calls); | ||
| 338 | if (rc) | ||
| 339 | goto err; | ||
| 340 | } | ||
| 334 | 341 | ||
| 335 | if (cpu_has_feature(CPU_FTR_HVMODE)) { | 342 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
| 336 | cxl_ops = &cxl_native_ops; | 343 | cxl_ops = &cxl_native_ops; |
| @@ -347,7 +354,8 @@ static int __init init_cxl(void) | |||
| 347 | 354 | ||
| 348 | return 0; | 355 | return 0; |
| 349 | err1: | 356 | err1: |
| 350 | unregister_cxl_calls(&cxl_calls); | 357 | if (cxl_is_power8()) |
| 358 | unregister_cxl_calls(&cxl_calls); | ||
| 351 | err: | 359 | err: |
| 352 | cxl_debugfs_exit(); | 360 | cxl_debugfs_exit(); |
| 353 | cxl_file_exit(); | 361 | cxl_file_exit(); |
| @@ -366,7 +374,8 @@ static void exit_cxl(void) | |||
| 366 | 374 | ||
| 367 | cxl_debugfs_exit(); | 375 | cxl_debugfs_exit(); |
| 368 | cxl_file_exit(); | 376 | cxl_file_exit(); |
| 369 | unregister_cxl_calls(&cxl_calls); | 377 | if (cxl_is_power8()) |
| 378 | unregister_cxl_calls(&cxl_calls); | ||
| 370 | idr_destroy(&cxl_adapter_idr); | 379 | idr_destroy(&cxl_adapter_idr); |
| 371 | } | 380 | } |
| 372 | 381 | ||
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 871a2f09c718..2b2f8894149d 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c | |||
| @@ -105,11 +105,16 @@ static int native_afu_reset(struct cxl_afu *afu) | |||
| 105 | CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, | 105 | CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, |
| 106 | false); | 106 | false); |
| 107 | 107 | ||
| 108 | /* Re-enable any masked interrupts */ | 108 | /* |
| 109 | serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); | 109 | * Re-enable any masked interrupts when the AFU is not |
| 110 | serr &= ~CXL_PSL_SERR_An_IRQ_MASKS; | 110 | * activated to avoid side effects after attaching a process |
| 111 | cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); | 111 | * in dedicated mode. |
| 112 | 112 | */ | |
| 113 | if (afu->current_mode == 0) { | ||
| 114 | serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); | ||
| 115 | serr &= ~CXL_PSL_SERR_An_IRQ_MASKS; | ||
| 116 | cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); | ||
| 117 | } | ||
| 113 | 118 | ||
| 114 | return rc; | 119 | return rc; |
| 115 | } | 120 | } |
| @@ -139,9 +144,9 @@ int cxl_psl_purge(struct cxl_afu *afu) | |||
| 139 | 144 | ||
| 140 | pr_devel("PSL purge request\n"); | 145 | pr_devel("PSL purge request\n"); |
| 141 | 146 | ||
| 142 | if (cxl_is_psl8(afu)) | 147 | if (cxl_is_power8()) |
| 143 | trans_fault = CXL_PSL_DSISR_TRANS; | 148 | trans_fault = CXL_PSL_DSISR_TRANS; |
| 144 | if (cxl_is_psl9(afu)) | 149 | if (cxl_is_power9()) |
| 145 | trans_fault = CXL_PSL9_DSISR_An_TF; | 150 | trans_fault = CXL_PSL9_DSISR_An_TF; |
| 146 | 151 | ||
| 147 | if (!cxl_ops->link_ok(afu->adapter, afu)) { | 152 | if (!cxl_ops->link_ok(afu->adapter, afu)) { |
| @@ -603,7 +608,7 @@ static u64 calculate_sr(struct cxl_context *ctx) | |||
| 603 | if (!test_tsk_thread_flag(current, TIF_32BIT)) | 608 | if (!test_tsk_thread_flag(current, TIF_32BIT)) |
| 604 | sr |= CXL_PSL_SR_An_SF; | 609 | sr |= CXL_PSL_SR_An_SF; |
| 605 | } | 610 | } |
| 606 | if (cxl_is_psl9(ctx->afu)) { | 611 | if (cxl_is_power9()) { |
| 607 | if (radix_enabled()) | 612 | if (radix_enabled()) |
| 608 | sr |= CXL_PSL_SR_An_XLAT_ror; | 613 | sr |= CXL_PSL_SR_An_XLAT_ror; |
| 609 | else | 614 | else |
| @@ -1117,10 +1122,10 @@ static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, | |||
| 1117 | 1122 | ||
| 1118 | static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr) | 1123 | static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr) |
| 1119 | { | 1124 | { |
| 1120 | if ((cxl_is_psl8(afu)) && (dsisr & CXL_PSL_DSISR_TRANS)) | 1125 | if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS)) |
| 1121 | return true; | 1126 | return true; |
| 1122 | 1127 | ||
| 1123 | if ((cxl_is_psl9(afu)) && (dsisr & CXL_PSL9_DSISR_An_TF)) | 1128 | if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF)) |
| 1124 | return true; | 1129 | return true; |
| 1125 | 1130 | ||
| 1126 | return false; | 1131 | return false; |
| @@ -1194,10 +1199,10 @@ static void native_irq_wait(struct cxl_context *ctx) | |||
| 1194 | if (ph != ctx->pe) | 1199 | if (ph != ctx->pe) |
| 1195 | return; | 1200 | return; |
| 1196 | dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); | 1201 | dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); |
| 1197 | if (cxl_is_psl8(ctx->afu) && | 1202 | if (cxl_is_power8() && |
| 1198 | ((dsisr & CXL_PSL_DSISR_PENDING) == 0)) | 1203 | ((dsisr & CXL_PSL_DSISR_PENDING) == 0)) |
| 1199 | return; | 1204 | return; |
| 1200 | if (cxl_is_psl9(ctx->afu) && | 1205 | if (cxl_is_power9() && |
| 1201 | ((dsisr & CXL_PSL9_DSISR_PENDING) == 0)) | 1206 | ((dsisr & CXL_PSL9_DSISR_PENDING) == 0)) |
| 1202 | return; | 1207 | return; |
| 1203 | /* | 1208 | /* |
| @@ -1302,13 +1307,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter) | |||
| 1302 | 1307 | ||
| 1303 | void cxl_native_release_psl_err_irq(struct cxl *adapter) | 1308 | void cxl_native_release_psl_err_irq(struct cxl *adapter) |
| 1304 | { | 1309 | { |
| 1305 | if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq)) | 1310 | if (adapter->native->err_virq == 0 || |
| 1311 | adapter->native->err_virq != | ||
| 1312 | irq_find_mapping(NULL, adapter->native->err_hwirq)) | ||
| 1306 | return; | 1313 | return; |
| 1307 | 1314 | ||
| 1308 | cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); | 1315 | cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); |
| 1309 | cxl_unmap_irq(adapter->native->err_virq, adapter); | 1316 | cxl_unmap_irq(adapter->native->err_virq, adapter); |
| 1310 | cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); | 1317 | cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); |
| 1311 | kfree(adapter->irq_name); | 1318 | kfree(adapter->irq_name); |
| 1319 | adapter->native->err_virq = 0; | ||
| 1312 | } | 1320 | } |
| 1313 | 1321 | ||
| 1314 | int cxl_native_register_serr_irq(struct cxl_afu *afu) | 1322 | int cxl_native_register_serr_irq(struct cxl_afu *afu) |
| @@ -1346,13 +1354,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu) | |||
| 1346 | 1354 | ||
| 1347 | void cxl_native_release_serr_irq(struct cxl_afu *afu) | 1355 | void cxl_native_release_serr_irq(struct cxl_afu *afu) |
| 1348 | { | 1356 | { |
| 1349 | if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) | 1357 | if (afu->serr_virq == 0 || |
| 1358 | afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) | ||
| 1350 | return; | 1359 | return; |
| 1351 | 1360 | ||
| 1352 | cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); | 1361 | cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); |
| 1353 | cxl_unmap_irq(afu->serr_virq, afu); | 1362 | cxl_unmap_irq(afu->serr_virq, afu); |
| 1354 | cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); | 1363 | cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); |
| 1355 | kfree(afu->err_irq_name); | 1364 | kfree(afu->err_irq_name); |
| 1365 | afu->serr_virq = 0; | ||
| 1356 | } | 1366 | } |
| 1357 | 1367 | ||
| 1358 | int cxl_native_register_psl_irq(struct cxl_afu *afu) | 1368 | int cxl_native_register_psl_irq(struct cxl_afu *afu) |
| @@ -1375,12 +1385,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu) | |||
| 1375 | 1385 | ||
| 1376 | void cxl_native_release_psl_irq(struct cxl_afu *afu) | 1386 | void cxl_native_release_psl_irq(struct cxl_afu *afu) |
| 1377 | { | 1387 | { |
| 1378 | if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq)) | 1388 | if (afu->native->psl_virq == 0 || |
| 1389 | afu->native->psl_virq != | ||
| 1390 | irq_find_mapping(NULL, afu->native->psl_hwirq)) | ||
| 1379 | return; | 1391 | return; |
| 1380 | 1392 | ||
| 1381 | cxl_unmap_irq(afu->native->psl_virq, afu); | 1393 | cxl_unmap_irq(afu->native->psl_virq, afu); |
| 1382 | cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); | 1394 | cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); |
| 1383 | kfree(afu->psl_irq_name); | 1395 | kfree(afu->psl_irq_name); |
| 1396 | afu->native->psl_virq = 0; | ||
| 1384 | } | 1397 | } |
| 1385 | 1398 | ||
| 1386 | static void recover_psl_err(struct cxl_afu *afu, u64 errstat) | 1399 | static void recover_psl_err(struct cxl_afu *afu, u64 errstat) |
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 6dc1ee5b92c9..1eb9859809bf 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c | |||
| @@ -436,7 +436,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci | |||
| 436 | /* nMMU_ID Defaults to: b’000001001’*/ | 436 | /* nMMU_ID Defaults to: b’000001001’*/ |
| 437 | xsl_dsnctl |= ((u64)0x09 << (63-28)); | 437 | xsl_dsnctl |= ((u64)0x09 << (63-28)); |
| 438 | 438 | ||
| 439 | if (cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1)) { | 439 | if (!(cxl_is_power9_dd1())) { |
| 440 | /* | 440 | /* |
| 441 | * Used to identify CAPI packets which should be sorted into | 441 | * Used to identify CAPI packets which should be sorted into |
| 442 | * the Non-Blocking queues by the PHB. This field should match | 442 | * the Non-Blocking queues by the PHB. This field should match |
| @@ -491,7 +491,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci | |||
| 491 | cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000003FFFF0000ULL); | 491 | cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000003FFFF0000ULL); |
| 492 | 492 | ||
| 493 | /* Disable vc dd1 fix */ | 493 | /* Disable vc dd1 fix */ |
| 494 | if ((cxl_is_power9() && cpu_has_feature(CPU_FTR_POWER9_DD1))) | 494 | if (cxl_is_power9_dd1()) |
| 495 | cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL); | 495 | cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL); |
| 496 | 496 | ||
| 497 | return 0; | 497 | return 0; |
| @@ -1439,8 +1439,7 @@ int cxl_pci_reset(struct cxl *adapter) | |||
| 1439 | * The adapter is about to be reset, so ignore errors. | 1439 | * The adapter is about to be reset, so ignore errors. |
| 1440 | * Not supported on P9 DD1 | 1440 | * Not supported on P9 DD1 |
| 1441 | */ | 1441 | */ |
| 1442 | if ((cxl_is_power8()) || | 1442 | if ((cxl_is_power8()) || (!(cxl_is_power9_dd1()))) |
| 1443 | ((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1)))) | ||
| 1444 | cxl_data_cache_flush(adapter); | 1443 | cxl_data_cache_flush(adapter); |
| 1445 | 1444 | ||
| 1446 | /* pcie_warm_reset requests a fundamental pci reset which includes a | 1445 | /* pcie_warm_reset requests a fundamental pci reset which includes a |
| @@ -1750,7 +1749,6 @@ static const struct cxl_service_layer_ops psl9_ops = { | |||
| 1750 | .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9, | 1749 | .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9, |
| 1751 | .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9, | 1750 | .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9, |
| 1752 | .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9, | 1751 | .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9, |
| 1753 | .err_irq_dump_registers = cxl_native_err_irq_dump_regs, | ||
| 1754 | .debugfs_stop_trace = cxl_stop_trace_psl9, | 1752 | .debugfs_stop_trace = cxl_stop_trace_psl9, |
| 1755 | .write_timebase_ctrl = write_timebase_ctrl_psl9, | 1753 | .write_timebase_ctrl = write_timebase_ctrl_psl9, |
| 1756 | .timebase_read = timebase_read_psl9, | 1754 | .timebase_read = timebase_read_psl9, |
| @@ -1889,8 +1887,7 @@ static void cxl_pci_remove_adapter(struct cxl *adapter) | |||
| 1889 | * Flush adapter datacache as its about to be removed. | 1887 | * Flush adapter datacache as its about to be removed. |
| 1890 | * Not supported on P9 DD1. | 1888 | * Not supported on P9 DD1. |
| 1891 | */ | 1889 | */ |
| 1892 | if ((cxl_is_power8()) || | 1890 | if ((cxl_is_power8()) || (!(cxl_is_power9_dd1()))) |
| 1893 | ((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1)))) | ||
| 1894 | cxl_data_cache_flush(adapter); | 1891 | cxl_data_cache_flush(adapter); |
| 1895 | 1892 | ||
| 1896 | cxl_deconfigure_adapter(adapter); | 1893 | cxl_deconfigure_adapter(adapter); |
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index d1928fdd0f43..07aad8576334 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c | |||
| @@ -763,8 +763,10 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, | |||
| 763 | { | 763 | { |
| 764 | struct mei_cl_device *cldev = to_mei_cl_device(dev); | 764 | struct mei_cl_device *cldev = to_mei_cl_device(dev); |
| 765 | const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); | 765 | const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); |
| 766 | u8 version = mei_me_cl_ver(cldev->me_cl); | ||
| 766 | 767 | ||
| 767 | return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid); | 768 | return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:", |
| 769 | cldev->name, uuid, version); | ||
| 768 | } | 770 | } |
| 769 | static DEVICE_ATTR_RO(modalias); | 771 | static DEVICE_ATTR_RO(modalias); |
| 770 | 772 | ||
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index c862cd4583cc..b8069eec18cb 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h | |||
| @@ -309,6 +309,9 @@ static inline enum xp_retval | |||
| 309 | xpc_send(short partid, int ch_number, u32 flags, void *payload, | 309 | xpc_send(short partid, int ch_number, u32 flags, void *payload, |
| 310 | u16 payload_size) | 310 | u16 payload_size) |
| 311 | { | 311 | { |
| 312 | if (!xpc_interface.send) | ||
| 313 | return xpNotLoaded; | ||
| 314 | |||
| 312 | return xpc_interface.send(partid, ch_number, flags, payload, | 315 | return xpc_interface.send(partid, ch_number, flags, payload, |
| 313 | payload_size); | 316 | payload_size); |
| 314 | } | 317 | } |
| @@ -317,6 +320,9 @@ static inline enum xp_retval | |||
| 317 | xpc_send_notify(short partid, int ch_number, u32 flags, void *payload, | 320 | xpc_send_notify(short partid, int ch_number, u32 flags, void *payload, |
| 318 | u16 payload_size, xpc_notify_func func, void *key) | 321 | u16 payload_size, xpc_notify_func func, void *key) |
| 319 | { | 322 | { |
| 323 | if (!xpc_interface.send_notify) | ||
| 324 | return xpNotLoaded; | ||
| 325 | |||
| 320 | return xpc_interface.send_notify(partid, ch_number, flags, payload, | 326 | return xpc_interface.send_notify(partid, ch_number, flags, payload, |
| 321 | payload_size, func, key); | 327 | payload_size, func, key); |
| 322 | } | 328 | } |
| @@ -324,12 +330,16 @@ xpc_send_notify(short partid, int ch_number, u32 flags, void *payload, | |||
| 324 | static inline void | 330 | static inline void |
| 325 | xpc_received(short partid, int ch_number, void *payload) | 331 | xpc_received(short partid, int ch_number, void *payload) |
| 326 | { | 332 | { |
| 327 | return xpc_interface.received(partid, ch_number, payload); | 333 | if (xpc_interface.received) |
| 334 | xpc_interface.received(partid, ch_number, payload); | ||
| 328 | } | 335 | } |
| 329 | 336 | ||
| 330 | static inline enum xp_retval | 337 | static inline enum xp_retval |
| 331 | xpc_partid_to_nasids(short partid, void *nasids) | 338 | xpc_partid_to_nasids(short partid, void *nasids) |
| 332 | { | 339 | { |
| 340 | if (!xpc_interface.partid_to_nasids) | ||
| 341 | return xpNotLoaded; | ||
| 342 | |||
| 333 | return xpc_interface.partid_to_nasids(partid, nasids); | 343 | return xpc_interface.partid_to_nasids(partid, nasids); |
| 334 | } | 344 | } |
| 335 | 345 | ||
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index 01be66d02ca8..6d7f557fd1c1 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c | |||
| @@ -69,23 +69,9 @@ struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS]; | |||
| 69 | EXPORT_SYMBOL_GPL(xpc_registrations); | 69 | EXPORT_SYMBOL_GPL(xpc_registrations); |
| 70 | 70 | ||
| 71 | /* | 71 | /* |
| 72 | * Initialize the XPC interface to indicate that XPC isn't loaded. | 72 | * Initialize the XPC interface to NULL to indicate that XPC isn't loaded. |
| 73 | */ | 73 | */ |
| 74 | static enum xp_retval | 74 | struct xpc_interface xpc_interface = { }; |
| 75 | xpc_notloaded(void) | ||
| 76 | { | ||
| 77 | return xpNotLoaded; | ||
| 78 | } | ||
| 79 | |||
| 80 | struct xpc_interface xpc_interface = { | ||
| 81 | (void (*)(int))xpc_notloaded, | ||
| 82 | (void (*)(int))xpc_notloaded, | ||
| 83 | (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded, | ||
| 84 | (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func, | ||
| 85 | void *))xpc_notloaded, | ||
| 86 | (void (*)(short, int, void *))xpc_notloaded, | ||
| 87 | (enum xp_retval(*)(short, void *))xpc_notloaded | ||
| 88 | }; | ||
| 89 | EXPORT_SYMBOL_GPL(xpc_interface); | 75 | EXPORT_SYMBOL_GPL(xpc_interface); |
| 90 | 76 | ||
| 91 | /* | 77 | /* |
| @@ -115,17 +101,7 @@ EXPORT_SYMBOL_GPL(xpc_set_interface); | |||
| 115 | void | 101 | void |
| 116 | xpc_clear_interface(void) | 102 | xpc_clear_interface(void) |
| 117 | { | 103 | { |
| 118 | xpc_interface.connect = (void (*)(int))xpc_notloaded; | 104 | memset(&xpc_interface, 0, sizeof(xpc_interface)); |
| 119 | xpc_interface.disconnect = (void (*)(int))xpc_notloaded; | ||
| 120 | xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16)) | ||
| 121 | xpc_notloaded; | ||
| 122 | xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *, | ||
| 123 | u16, xpc_notify_func, | ||
| 124 | void *))xpc_notloaded; | ||
| 125 | xpc_interface.received = (void (*)(short, int, void *)) | ||
| 126 | xpc_notloaded; | ||
| 127 | xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *)) | ||
| 128 | xpc_notloaded; | ||
| 129 | } | 105 | } |
| 130 | EXPORT_SYMBOL_GPL(xpc_clear_interface); | 106 | EXPORT_SYMBOL_GPL(xpc_clear_interface); |
| 131 | 107 | ||
| @@ -188,7 +164,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, | |||
| 188 | 164 | ||
| 189 | mutex_unlock(®istration->mutex); | 165 | mutex_unlock(®istration->mutex); |
| 190 | 166 | ||
| 191 | xpc_interface.connect(ch_number); | 167 | if (xpc_interface.connect) |
| 168 | xpc_interface.connect(ch_number); | ||
| 192 | 169 | ||
| 193 | return xpSuccess; | 170 | return xpSuccess; |
| 194 | } | 171 | } |
| @@ -237,7 +214,8 @@ xpc_disconnect(int ch_number) | |||
| 237 | registration->assigned_limit = 0; | 214 | registration->assigned_limit = 0; |
| 238 | registration->idle_limit = 0; | 215 | registration->idle_limit = 0; |
| 239 | 216 | ||
| 240 | xpc_interface.disconnect(ch_number); | 217 | if (xpc_interface.disconnect) |
| 218 | xpc_interface.disconnect(ch_number); | ||
| 241 | 219 | ||
| 242 | mutex_unlock(®istration->mutex); | 220 | mutex_unlock(®istration->mutex); |
| 243 | 221 | ||
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 1842ed341af1..de962c2d5e00 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c | |||
| @@ -210,6 +210,15 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc, | |||
| 210 | int i; | 210 | int i; |
| 211 | bool use_desc_chain_mode = true; | 211 | bool use_desc_chain_mode = true; |
| 212 | 212 | ||
| 213 | /* | ||
| 214 | * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been | ||
| 215 | * reported. For some strange reason this occurs in descriptor | ||
| 216 | * chain mode only. So let's fall back to bounce buffer mode | ||
| 217 | * for command SD_IO_RW_EXTENDED. | ||
| 218 | */ | ||
| 219 | if (mrq->cmd->opcode == SD_IO_RW_EXTENDED) | ||
| 220 | return; | ||
| 221 | |||
| 213 | for_each_sg(data->sg, sg, data->sg_len, i) | 222 | for_each_sg(data->sg, sg, data->sg_len, i) |
| 214 | /* check for 8 byte alignment */ | 223 | /* check for 8 byte alignment */ |
| 215 | if (sg->offset & 7) { | 224 | if (sg->offset & 7) { |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index d474378ed810..b1dd12729f19 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
| @@ -202,7 +202,7 @@ static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section, | |||
| 202 | return 0; | 202 | return 0; |
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { | 205 | static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { |
| 206 | .ecc = nand_ooblayout_ecc_lp_hamming, | 206 | .ecc = nand_ooblayout_ecc_lp_hamming, |
| 207 | .free = nand_ooblayout_free_lp_hamming, | 207 | .free = nand_ooblayout_free_lp_hamming, |
| 208 | }; | 208 | }; |
| @@ -4361,7 +4361,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, | |||
| 4361 | /* Initialize the ->data_interface field. */ | 4361 | /* Initialize the ->data_interface field. */ |
| 4362 | ret = nand_init_data_interface(chip); | 4362 | ret = nand_init_data_interface(chip); |
| 4363 | if (ret) | 4363 | if (ret) |
| 4364 | return ret; | 4364 | goto err_nand_init; |
| 4365 | 4365 | ||
| 4366 | /* | 4366 | /* |
| 4367 | * Setup the data interface correctly on the chip and controller side. | 4367 | * Setup the data interface correctly on the chip and controller side. |
| @@ -4373,7 +4373,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, | |||
| 4373 | */ | 4373 | */ |
| 4374 | ret = nand_setup_data_interface(chip); | 4374 | ret = nand_setup_data_interface(chip); |
| 4375 | if (ret) | 4375 | if (ret) |
| 4376 | return ret; | 4376 | goto err_nand_init; |
| 4377 | 4377 | ||
| 4378 | nand_maf_id = chip->id.data[0]; | 4378 | nand_maf_id = chip->id.data[0]; |
| 4379 | nand_dev_id = chip->id.data[1]; | 4379 | nand_dev_id = chip->id.data[1]; |
| @@ -4404,6 +4404,12 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, | |||
| 4404 | mtd->size = i * chip->chipsize; | 4404 | mtd->size = i * chip->chipsize; |
| 4405 | 4405 | ||
| 4406 | return 0; | 4406 | return 0; |
| 4407 | |||
| 4408 | err_nand_init: | ||
| 4409 | /* Free manufacturer priv data. */ | ||
| 4410 | nand_manufacturer_cleanup(chip); | ||
| 4411 | |||
| 4412 | return ret; | ||
| 4407 | } | 4413 | } |
| 4408 | EXPORT_SYMBOL(nand_scan_ident); | 4414 | EXPORT_SYMBOL(nand_scan_ident); |
| 4409 | 4415 | ||
| @@ -4574,18 +4580,23 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
| 4574 | 4580 | ||
| 4575 | /* New bad blocks should be marked in OOB, flash-based BBT, or both */ | 4581 | /* New bad blocks should be marked in OOB, flash-based BBT, or both */ |
| 4576 | if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && | 4582 | if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && |
| 4577 | !(chip->bbt_options & NAND_BBT_USE_FLASH))) | 4583 | !(chip->bbt_options & NAND_BBT_USE_FLASH))) { |
| 4578 | return -EINVAL; | 4584 | ret = -EINVAL; |
| 4585 | goto err_ident; | ||
| 4586 | } | ||
| 4579 | 4587 | ||
| 4580 | if (invalid_ecc_page_accessors(chip)) { | 4588 | if (invalid_ecc_page_accessors(chip)) { |
| 4581 | pr_err("Invalid ECC page accessors setup\n"); | 4589 | pr_err("Invalid ECC page accessors setup\n"); |
| 4582 | return -EINVAL; | 4590 | ret = -EINVAL; |
| 4591 | goto err_ident; | ||
| 4583 | } | 4592 | } |
| 4584 | 4593 | ||
| 4585 | if (!(chip->options & NAND_OWN_BUFFERS)) { | 4594 | if (!(chip->options & NAND_OWN_BUFFERS)) { |
| 4586 | nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL); | 4595 | nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL); |
| 4587 | if (!nbuf) | 4596 | if (!nbuf) { |
| 4588 | return -ENOMEM; | 4597 | ret = -ENOMEM; |
| 4598 | goto err_ident; | ||
| 4599 | } | ||
| 4589 | 4600 | ||
| 4590 | nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL); | 4601 | nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL); |
| 4591 | if (!nbuf->ecccalc) { | 4602 | if (!nbuf->ecccalc) { |
| @@ -4608,8 +4619,10 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
| 4608 | 4619 | ||
| 4609 | chip->buffers = nbuf; | 4620 | chip->buffers = nbuf; |
| 4610 | } else { | 4621 | } else { |
| 4611 | if (!chip->buffers) | 4622 | if (!chip->buffers) { |
| 4612 | return -ENOMEM; | 4623 | ret = -ENOMEM; |
| 4624 | goto err_ident; | ||
| 4625 | } | ||
| 4613 | } | 4626 | } |
| 4614 | 4627 | ||
| 4615 | /* Set the internal oob buffer location, just after the page data */ | 4628 | /* Set the internal oob buffer location, just after the page data */ |
| @@ -4842,7 +4855,11 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
| 4842 | return 0; | 4855 | return 0; |
| 4843 | 4856 | ||
| 4844 | /* Build bad block table */ | 4857 | /* Build bad block table */ |
| 4845 | return chip->scan_bbt(mtd); | 4858 | ret = chip->scan_bbt(mtd); |
| 4859 | if (ret) | ||
| 4860 | goto err_free; | ||
| 4861 | return 0; | ||
| 4862 | |||
| 4846 | err_free: | 4863 | err_free: |
| 4847 | if (nbuf) { | 4864 | if (nbuf) { |
| 4848 | kfree(nbuf->databuf); | 4865 | kfree(nbuf->databuf); |
| @@ -4850,6 +4867,13 @@ err_free: | |||
| 4850 | kfree(nbuf->ecccalc); | 4867 | kfree(nbuf->ecccalc); |
| 4851 | kfree(nbuf); | 4868 | kfree(nbuf); |
| 4852 | } | 4869 | } |
| 4870 | |||
| 4871 | err_ident: | ||
| 4872 | /* Clean up nand_scan_ident(). */ | ||
| 4873 | |||
| 4874 | /* Free manufacturer priv data. */ | ||
| 4875 | nand_manufacturer_cleanup(chip); | ||
| 4876 | |||
| 4853 | return ret; | 4877 | return ret; |
| 4854 | } | 4878 | } |
| 4855 | EXPORT_SYMBOL(nand_scan_tail); | 4879 | EXPORT_SYMBOL(nand_scan_tail); |
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 9d5ca0e540b5..92e2cf8e9ff9 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c | |||
| @@ -6,7 +6,6 @@ | |||
| 6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
| 7 | * | 7 | * |
| 8 | */ | 8 | */ |
| 9 | #include <linux/module.h> | ||
| 10 | #include <linux/mtd/nand.h> | 9 | #include <linux/mtd/nand.h> |
| 11 | #include <linux/sizes.h> | 10 | #include <linux/sizes.h> |
| 12 | 11 | ||
diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c index 9cfc4035a420..1e0755997762 100644 --- a/drivers/mtd/nand/nand_samsung.c +++ b/drivers/mtd/nand/nand_samsung.c | |||
| @@ -84,6 +84,9 @@ static void samsung_nand_decode_id(struct nand_chip *chip) | |||
| 84 | case 7: | 84 | case 7: |
| 85 | chip->ecc_strength_ds = 60; | 85 | chip->ecc_strength_ds = 60; |
| 86 | break; | 86 | break; |
| 87 | default: | ||
| 88 | WARN(1, "Could not decode ECC info"); | ||
| 89 | chip->ecc_step_ds = 0; | ||
| 87 | } | 90 | } |
| 88 | } | 91 | } |
| 89 | } else { | 92 | } else { |
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c index 05b6e1065203..49b286c6c10f 100644 --- a/drivers/mtd/nand/tango_nand.c +++ b/drivers/mtd/nand/tango_nand.c | |||
| @@ -55,10 +55,10 @@ | |||
| 55 | * byte 1 for other packets in the page (PKT_N, for N > 0) | 55 | * byte 1 for other packets in the page (PKT_N, for N > 0) |
| 56 | * ERR_COUNT_PKT_N is the max error count over all but the first packet. | 56 | * ERR_COUNT_PKT_N is the max error count over all but the first packet. |
| 57 | */ | 57 | */ |
| 58 | #define DECODE_OK_PKT_0(v) ((v) & BIT(7)) | ||
| 59 | #define DECODE_OK_PKT_N(v) ((v) & BIT(15)) | ||
| 60 | #define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f) | 58 | #define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f) |
| 61 | #define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f) | 59 | #define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f) |
| 60 | #define DECODE_FAIL_PKT_0(v) (((v) & BIT(7)) == 0) | ||
| 61 | #define DECODE_FAIL_PKT_N(v) (((v) & BIT(15)) == 0) | ||
| 62 | 62 | ||
| 63 | /* Offsets relative to pbus_base */ | 63 | /* Offsets relative to pbus_base */ |
| 64 | #define PBUS_CS_CTRL 0x83c | 64 | #define PBUS_CS_CTRL 0x83c |
| @@ -193,6 +193,8 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf) | |||
| 193 | chip->ecc.strength); | 193 | chip->ecc.strength); |
| 194 | if (res < 0) | 194 | if (res < 0) |
| 195 | mtd->ecc_stats.failed++; | 195 | mtd->ecc_stats.failed++; |
| 196 | else | ||
| 197 | mtd->ecc_stats.corrected += res; | ||
| 196 | 198 | ||
| 197 | bitflips = max(res, bitflips); | 199 | bitflips = max(res, bitflips); |
| 198 | buf += pkt_size; | 200 | buf += pkt_size; |
| @@ -202,9 +204,11 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf) | |||
| 202 | return bitflips; | 204 | return bitflips; |
| 203 | } | 205 | } |
| 204 | 206 | ||
| 205 | static int decode_error_report(struct tango_nfc *nfc) | 207 | static int decode_error_report(struct nand_chip *chip) |
| 206 | { | 208 | { |
| 207 | u32 status, res; | 209 | u32 status, res; |
| 210 | struct mtd_info *mtd = nand_to_mtd(chip); | ||
| 211 | struct tango_nfc *nfc = to_tango_nfc(chip->controller); | ||
| 208 | 212 | ||
| 209 | status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS); | 213 | status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS); |
| 210 | if (status & PAGE_IS_EMPTY) | 214 | if (status & PAGE_IS_EMPTY) |
| @@ -212,10 +216,14 @@ static int decode_error_report(struct tango_nfc *nfc) | |||
| 212 | 216 | ||
| 213 | res = readl_relaxed(nfc->mem_base + ERROR_REPORT); | 217 | res = readl_relaxed(nfc->mem_base + ERROR_REPORT); |
| 214 | 218 | ||
| 215 | if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res)) | 219 | if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res)) |
| 216 | return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res)); | 220 | return -EBADMSG; |
| 221 | |||
| 222 | /* ERR_COUNT_PKT_N is max, not sum, but that's all we have */ | ||
| 223 | mtd->ecc_stats.corrected += | ||
| 224 | ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res); | ||
| 217 | 225 | ||
| 218 | return -EBADMSG; | 226 | return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res)); |
| 219 | } | 227 | } |
| 220 | 228 | ||
| 221 | static void tango_dma_callback(void *arg) | 229 | static void tango_dma_callback(void *arg) |
| @@ -282,7 +290,7 @@ static int tango_read_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 282 | if (err) | 290 | if (err) |
| 283 | return err; | 291 | return err; |
| 284 | 292 | ||
| 285 | res = decode_error_report(nfc); | 293 | res = decode_error_report(chip); |
| 286 | if (res < 0) { | 294 | if (res < 0) { |
| 287 | chip->ecc.read_oob_raw(mtd, chip, page); | 295 | chip->ecc.read_oob_raw(mtd, chip, page); |
| 288 | res = check_erased_page(chip, buf); | 296 | res = check_erased_page(chip, buf); |
| @@ -663,6 +671,7 @@ static const struct of_device_id tango_nand_ids[] = { | |||
| 663 | { .compatible = "sigma,smp8758-nand" }, | 671 | { .compatible = "sigma,smp8758-nand" }, |
| 664 | { /* sentinel */ } | 672 | { /* sentinel */ } |
| 665 | }; | 673 | }; |
| 674 | MODULE_DEVICE_TABLE(of, tango_nand_ids); | ||
| 666 | 675 | ||
| 667 | static struct platform_driver tango_nand_driver = { | 676 | static struct platform_driver tango_nand_driver = { |
| 668 | .probe = tango_nand_probe, | 677 | .probe = tango_nand_probe, |
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 62ee439d5882..53a1cb551def 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c | |||
| @@ -756,6 +756,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) | |||
| 756 | struct net_device *dev = dev_id; | 756 | struct net_device *dev = dev_id; |
| 757 | struct arcnet_local *lp; | 757 | struct arcnet_local *lp; |
| 758 | int recbuf, status, diagstatus, didsomething, boguscount; | 758 | int recbuf, status, diagstatus, didsomething, boguscount; |
| 759 | unsigned long flags; | ||
| 759 | int retval = IRQ_NONE; | 760 | int retval = IRQ_NONE; |
| 760 | 761 | ||
| 761 | arc_printk(D_DURING, dev, "\n"); | 762 | arc_printk(D_DURING, dev, "\n"); |
| @@ -765,7 +766,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) | |||
| 765 | lp = netdev_priv(dev); | 766 | lp = netdev_priv(dev); |
| 766 | BUG_ON(!lp); | 767 | BUG_ON(!lp); |
| 767 | 768 | ||
| 768 | spin_lock(&lp->lock); | 769 | spin_lock_irqsave(&lp->lock, flags); |
| 769 | 770 | ||
| 770 | /* RESET flag was enabled - if device is not running, we must | 771 | /* RESET flag was enabled - if device is not running, we must |
| 771 | * clear it right away (but nothing else). | 772 | * clear it right away (but nothing else). |
| @@ -774,7 +775,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) | |||
| 774 | if (lp->hw.status(dev) & RESETflag) | 775 | if (lp->hw.status(dev) & RESETflag) |
| 775 | lp->hw.command(dev, CFLAGScmd | RESETclear); | 776 | lp->hw.command(dev, CFLAGScmd | RESETclear); |
| 776 | lp->hw.intmask(dev, 0); | 777 | lp->hw.intmask(dev, 0); |
| 777 | spin_unlock(&lp->lock); | 778 | spin_unlock_irqrestore(&lp->lock, flags); |
| 778 | return retval; | 779 | return retval; |
| 779 | } | 780 | } |
| 780 | 781 | ||
| @@ -998,7 +999,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) | |||
| 998 | udelay(1); | 999 | udelay(1); |
| 999 | lp->hw.intmask(dev, lp->intmask); | 1000 | lp->hw.intmask(dev, lp->intmask); |
| 1000 | 1001 | ||
| 1001 | spin_unlock(&lp->lock); | 1002 | spin_unlock_irqrestore(&lp->lock, flags); |
| 1002 | return retval; | 1003 | return retval; |
| 1003 | } | 1004 | } |
| 1004 | EXPORT_SYMBOL(arcnet_interrupt); | 1005 | EXPORT_SYMBOL(arcnet_interrupt); |
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index 2056878fb087..4fa2e46b48d3 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c | |||
| @@ -212,7 +212,7 @@ static int ack_tx(struct net_device *dev, int acked) | |||
| 212 | ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */ | 212 | ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */ |
| 213 | ackpkt->soft.cap.mes.ack = acked; | 213 | ackpkt->soft.cap.mes.ack = acked; |
| 214 | 214 | ||
| 215 | arc_printk(D_PROTO, dev, "Ackknowledge for cap packet %x.\n", | 215 | arc_printk(D_PROTO, dev, "Acknowledge for cap packet %x.\n", |
| 216 | *((int *)&ackpkt->soft.cap.cookie[0])); | 216 | *((int *)&ackpkt->soft.cap.cookie[0])); |
| 217 | 217 | ||
| 218 | ackskb->protocol = cpu_to_be16(ETH_P_ARCNET); | 218 | ackskb->protocol = cpu_to_be16(ETH_P_ARCNET); |
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 239de38fbd6a..47f80b83dcf4 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c | |||
| @@ -135,6 +135,7 @@ static int com20020pci_probe(struct pci_dev *pdev, | |||
| 135 | for (i = 0; i < ci->devcount; i++) { | 135 | for (i = 0; i < ci->devcount; i++) { |
| 136 | struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i]; | 136 | struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i]; |
| 137 | struct com20020_dev *card; | 137 | struct com20020_dev *card; |
| 138 | int dev_id_mask = 0xf; | ||
| 138 | 139 | ||
| 139 | dev = alloc_arcdev(device); | 140 | dev = alloc_arcdev(device); |
| 140 | if (!dev) { | 141 | if (!dev) { |
| @@ -166,6 +167,7 @@ static int com20020pci_probe(struct pci_dev *pdev, | |||
| 166 | arcnet_outb(0x00, ioaddr, COM20020_REG_W_COMMAND); | 167 | arcnet_outb(0x00, ioaddr, COM20020_REG_W_COMMAND); |
| 167 | arcnet_inb(ioaddr, COM20020_REG_R_DIAGSTAT); | 168 | arcnet_inb(ioaddr, COM20020_REG_R_DIAGSTAT); |
| 168 | 169 | ||
| 170 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
| 169 | dev->base_addr = ioaddr; | 171 | dev->base_addr = ioaddr; |
| 170 | dev->dev_addr[0] = node; | 172 | dev->dev_addr[0] = node; |
| 171 | dev->irq = pdev->irq; | 173 | dev->irq = pdev->irq; |
| @@ -179,8 +181,8 @@ static int com20020pci_probe(struct pci_dev *pdev, | |||
| 179 | 181 | ||
| 180 | /* Get the dev_id from the PLX rotary coder */ | 182 | /* Get the dev_id from the PLX rotary coder */ |
| 181 | if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15)) | 183 | if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15)) |
| 182 | dev->dev_id = 0xc; | 184 | dev_id_mask = 0x3; |
| 183 | dev->dev_id ^= inb(priv->misc + ci->rotary) >> 4; | 185 | dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask; |
| 184 | 186 | ||
| 185 | snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i); | 187 | snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i); |
| 186 | 188 | ||
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c index 13d9ad4b3f5c..78043a9c5981 100644 --- a/drivers/net/arcnet/com20020.c +++ b/drivers/net/arcnet/com20020.c | |||
| @@ -246,8 +246,6 @@ int com20020_found(struct net_device *dev, int shared) | |||
| 246 | return -ENODEV; | 246 | return -ENODEV; |
| 247 | } | 247 | } |
| 248 | 248 | ||
| 249 | dev->base_addr = ioaddr; | ||
| 250 | |||
| 251 | arc_printk(D_NORMAL, dev, "%s: station %02Xh found at %03lXh, IRQ %d.\n", | 249 | arc_printk(D_NORMAL, dev, "%s: station %02Xh found at %03lXh, IRQ %d.\n", |
| 252 | lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq); | 250 | lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq); |
| 253 | 251 | ||
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index b44a6aeb346d..e5386ab706ec 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
| @@ -90,10 +90,13 @@ enum ad_link_speed_type { | |||
| 90 | AD_LINK_SPEED_100MBPS, | 90 | AD_LINK_SPEED_100MBPS, |
| 91 | AD_LINK_SPEED_1000MBPS, | 91 | AD_LINK_SPEED_1000MBPS, |
| 92 | AD_LINK_SPEED_2500MBPS, | 92 | AD_LINK_SPEED_2500MBPS, |
| 93 | AD_LINK_SPEED_5000MBPS, | ||
| 93 | AD_LINK_SPEED_10000MBPS, | 94 | AD_LINK_SPEED_10000MBPS, |
| 95 | AD_LINK_SPEED_14000MBPS, | ||
| 94 | AD_LINK_SPEED_20000MBPS, | 96 | AD_LINK_SPEED_20000MBPS, |
| 95 | AD_LINK_SPEED_25000MBPS, | 97 | AD_LINK_SPEED_25000MBPS, |
| 96 | AD_LINK_SPEED_40000MBPS, | 98 | AD_LINK_SPEED_40000MBPS, |
| 99 | AD_LINK_SPEED_50000MBPS, | ||
| 97 | AD_LINK_SPEED_56000MBPS, | 100 | AD_LINK_SPEED_56000MBPS, |
| 98 | AD_LINK_SPEED_100000MBPS, | 101 | AD_LINK_SPEED_100000MBPS, |
| 99 | }; | 102 | }; |
| @@ -259,10 +262,13 @@ static inline int __check_agg_selection_timer(struct port *port) | |||
| 259 | * %AD_LINK_SPEED_100MBPS, | 262 | * %AD_LINK_SPEED_100MBPS, |
| 260 | * %AD_LINK_SPEED_1000MBPS, | 263 | * %AD_LINK_SPEED_1000MBPS, |
| 261 | * %AD_LINK_SPEED_2500MBPS, | 264 | * %AD_LINK_SPEED_2500MBPS, |
| 265 | * %AD_LINK_SPEED_5000MBPS, | ||
| 262 | * %AD_LINK_SPEED_10000MBPS | 266 | * %AD_LINK_SPEED_10000MBPS |
| 267 | * %AD_LINK_SPEED_14000MBPS, | ||
| 263 | * %AD_LINK_SPEED_20000MBPS | 268 | * %AD_LINK_SPEED_20000MBPS |
| 264 | * %AD_LINK_SPEED_25000MBPS | 269 | * %AD_LINK_SPEED_25000MBPS |
| 265 | * %AD_LINK_SPEED_40000MBPS | 270 | * %AD_LINK_SPEED_40000MBPS |
| 271 | * %AD_LINK_SPEED_50000MBPS | ||
| 266 | * %AD_LINK_SPEED_56000MBPS | 272 | * %AD_LINK_SPEED_56000MBPS |
| 267 | * %AD_LINK_SPEED_100000MBPS | 273 | * %AD_LINK_SPEED_100000MBPS |
| 268 | */ | 274 | */ |
| @@ -296,10 +302,18 @@ static u16 __get_link_speed(struct port *port) | |||
| 296 | speed = AD_LINK_SPEED_2500MBPS; | 302 | speed = AD_LINK_SPEED_2500MBPS; |
| 297 | break; | 303 | break; |
| 298 | 304 | ||
| 305 | case SPEED_5000: | ||
| 306 | speed = AD_LINK_SPEED_5000MBPS; | ||
| 307 | break; | ||
| 308 | |||
| 299 | case SPEED_10000: | 309 | case SPEED_10000: |
| 300 | speed = AD_LINK_SPEED_10000MBPS; | 310 | speed = AD_LINK_SPEED_10000MBPS; |
| 301 | break; | 311 | break; |
| 302 | 312 | ||
| 313 | case SPEED_14000: | ||
| 314 | speed = AD_LINK_SPEED_14000MBPS; | ||
| 315 | break; | ||
| 316 | |||
| 303 | case SPEED_20000: | 317 | case SPEED_20000: |
| 304 | speed = AD_LINK_SPEED_20000MBPS; | 318 | speed = AD_LINK_SPEED_20000MBPS; |
| 305 | break; | 319 | break; |
| @@ -312,6 +326,10 @@ static u16 __get_link_speed(struct port *port) | |||
| 312 | speed = AD_LINK_SPEED_40000MBPS; | 326 | speed = AD_LINK_SPEED_40000MBPS; |
| 313 | break; | 327 | break; |
| 314 | 328 | ||
| 329 | case SPEED_50000: | ||
| 330 | speed = AD_LINK_SPEED_50000MBPS; | ||
| 331 | break; | ||
| 332 | |||
| 315 | case SPEED_56000: | 333 | case SPEED_56000: |
| 316 | speed = AD_LINK_SPEED_56000MBPS; | 334 | speed = AD_LINK_SPEED_56000MBPS; |
| 317 | break; | 335 | break; |
| @@ -707,9 +725,15 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator) | |||
| 707 | case AD_LINK_SPEED_2500MBPS: | 725 | case AD_LINK_SPEED_2500MBPS: |
| 708 | bandwidth = nports * 2500; | 726 | bandwidth = nports * 2500; |
| 709 | break; | 727 | break; |
| 728 | case AD_LINK_SPEED_5000MBPS: | ||
| 729 | bandwidth = nports * 5000; | ||
| 730 | break; | ||
| 710 | case AD_LINK_SPEED_10000MBPS: | 731 | case AD_LINK_SPEED_10000MBPS: |
| 711 | bandwidth = nports * 10000; | 732 | bandwidth = nports * 10000; |
| 712 | break; | 733 | break; |
| 734 | case AD_LINK_SPEED_14000MBPS: | ||
| 735 | bandwidth = nports * 14000; | ||
| 736 | break; | ||
| 713 | case AD_LINK_SPEED_20000MBPS: | 737 | case AD_LINK_SPEED_20000MBPS: |
| 714 | bandwidth = nports * 20000; | 738 | bandwidth = nports * 20000; |
| 715 | break; | 739 | break; |
| @@ -719,6 +743,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator) | |||
| 719 | case AD_LINK_SPEED_40000MBPS: | 743 | case AD_LINK_SPEED_40000MBPS: |
| 720 | bandwidth = nports * 40000; | 744 | bandwidth = nports * 40000; |
| 721 | break; | 745 | break; |
| 746 | case AD_LINK_SPEED_50000MBPS: | ||
| 747 | bandwidth = nports * 50000; | ||
| 748 | break; | ||
| 722 | case AD_LINK_SPEED_56000MBPS: | 749 | case AD_LINK_SPEED_56000MBPS: |
| 723 | bandwidth = nports * 56000; | 750 | bandwidth = nports * 56000; |
| 724 | break; | 751 | break; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2359478b977f..8ab6bdbe1682 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -4192,7 +4192,6 @@ static void bond_destructor(struct net_device *bond_dev) | |||
| 4192 | struct bonding *bond = netdev_priv(bond_dev); | 4192 | struct bonding *bond = netdev_priv(bond_dev); |
| 4193 | if (bond->wq) | 4193 | if (bond->wq) |
| 4194 | destroy_workqueue(bond->wq); | 4194 | destroy_workqueue(bond->wq); |
| 4195 | free_netdev(bond_dev); | ||
| 4196 | } | 4195 | } |
| 4197 | 4196 | ||
| 4198 | void bond_setup(struct net_device *bond_dev) | 4197 | void bond_setup(struct net_device *bond_dev) |
| @@ -4212,7 +4211,8 @@ void bond_setup(struct net_device *bond_dev) | |||
| 4212 | bond_dev->netdev_ops = &bond_netdev_ops; | 4211 | bond_dev->netdev_ops = &bond_netdev_ops; |
| 4213 | bond_dev->ethtool_ops = &bond_ethtool_ops; | 4212 | bond_dev->ethtool_ops = &bond_ethtool_ops; |
| 4214 | 4213 | ||
| 4215 | bond_dev->destructor = bond_destructor; | 4214 | bond_dev->needs_free_netdev = true; |
| 4215 | bond_dev->priv_destructor = bond_destructor; | ||
| 4216 | 4216 | ||
| 4217 | SET_NETDEV_DEVTYPE(bond_dev, &bond_type); | 4217 | SET_NETDEV_DEVTYPE(bond_dev, &bond_type); |
| 4218 | 4218 | ||
| @@ -4736,7 +4736,7 @@ int bond_create(struct net *net, const char *name) | |||
| 4736 | 4736 | ||
| 4737 | rtnl_unlock(); | 4737 | rtnl_unlock(); |
| 4738 | if (res < 0) | 4738 | if (res < 0) |
| 4739 | bond_destructor(bond_dev); | 4739 | free_netdev(bond_dev); |
| 4740 | return res; | 4740 | return res; |
| 4741 | } | 4741 | } |
| 4742 | 4742 | ||
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index ddabce759456..71a7c3b44fdd 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c | |||
| @@ -1121,7 +1121,7 @@ static void cfhsi_setup(struct net_device *dev) | |||
| 1121 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | 1121 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; |
| 1122 | dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; | 1122 | dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; |
| 1123 | dev->priv_flags |= IFF_NO_QUEUE; | 1123 | dev->priv_flags |= IFF_NO_QUEUE; |
| 1124 | dev->destructor = free_netdev; | 1124 | dev->needs_free_netdev = true; |
| 1125 | dev->netdev_ops = &cfhsi_netdevops; | 1125 | dev->netdev_ops = &cfhsi_netdevops; |
| 1126 | for (i = 0; i < CFHSI_PRIO_LAST; ++i) | 1126 | for (i = 0; i < CFHSI_PRIO_LAST; ++i) |
| 1127 | skb_queue_head_init(&cfhsi->qhead[i]); | 1127 | skb_queue_head_init(&cfhsi->qhead[i]); |
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index c2dea4916e5d..76e1d3545105 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c | |||
| @@ -428,7 +428,7 @@ static void caifdev_setup(struct net_device *dev) | |||
| 428 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | 428 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; |
| 429 | dev->mtu = CAIF_MAX_MTU; | 429 | dev->mtu = CAIF_MAX_MTU; |
| 430 | dev->priv_flags |= IFF_NO_QUEUE; | 430 | dev->priv_flags |= IFF_NO_QUEUE; |
| 431 | dev->destructor = free_netdev; | 431 | dev->needs_free_netdev = true; |
| 432 | skb_queue_head_init(&serdev->head); | 432 | skb_queue_head_init(&serdev->head); |
| 433 | serdev->common.link_select = CAIF_LINK_LOW_LATENCY; | 433 | serdev->common.link_select = CAIF_LINK_LOW_LATENCY; |
| 434 | serdev->common.use_frag = true; | 434 | serdev->common.use_frag = true; |
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 3a529fbe539f..fc21afe852b9 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c | |||
| @@ -712,7 +712,7 @@ static void cfspi_setup(struct net_device *dev) | |||
| 712 | dev->flags = IFF_NOARP | IFF_POINTOPOINT; | 712 | dev->flags = IFF_NOARP | IFF_POINTOPOINT; |
| 713 | dev->priv_flags |= IFF_NO_QUEUE; | 713 | dev->priv_flags |= IFF_NO_QUEUE; |
| 714 | dev->mtu = SPI_MAX_PAYLOAD_SIZE; | 714 | dev->mtu = SPI_MAX_PAYLOAD_SIZE; |
| 715 | dev->destructor = free_netdev; | 715 | dev->needs_free_netdev = true; |
| 716 | skb_queue_head_init(&cfspi->qhead); | 716 | skb_queue_head_init(&cfspi->qhead); |
| 717 | skb_queue_head_init(&cfspi->chead); | 717 | skb_queue_head_init(&cfspi->chead); |
| 718 | cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; | 718 | cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; |
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index 6122768c8644..1794ea0420b7 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c | |||
| @@ -617,7 +617,7 @@ static void cfv_netdev_setup(struct net_device *netdev) | |||
| 617 | netdev->tx_queue_len = 100; | 617 | netdev->tx_queue_len = 100; |
| 618 | netdev->flags = IFF_POINTOPOINT | IFF_NOARP; | 618 | netdev->flags = IFF_POINTOPOINT | IFF_NOARP; |
| 619 | netdev->mtu = CFV_DEF_MTU_SIZE; | 619 | netdev->mtu = CFV_DEF_MTU_SIZE; |
| 620 | netdev->destructor = free_netdev; | 620 | netdev->needs_free_netdev = true; |
| 621 | } | 621 | } |
| 622 | 622 | ||
| 623 | /* Create debugfs counters for the device */ | 623 | /* Create debugfs counters for the device */ |
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 611d16a7061d..ae4ed03dc642 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c | |||
| @@ -391,6 +391,9 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, | |||
| 391 | can_update_state_error_stats(dev, new_state); | 391 | can_update_state_error_stats(dev, new_state); |
| 392 | priv->state = new_state; | 392 | priv->state = new_state; |
| 393 | 393 | ||
| 394 | if (!cf) | ||
| 395 | return; | ||
| 396 | |||
| 394 | if (unlikely(new_state == CAN_STATE_BUS_OFF)) { | 397 | if (unlikely(new_state == CAN_STATE_BUS_OFF)) { |
| 395 | cf->can_id |= CAN_ERR_BUSOFF; | 398 | cf->can_id |= CAN_ERR_BUSOFF; |
| 396 | return; | 399 | return; |
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 0d57be5ea97b..85268be0c913 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c | |||
| @@ -489,7 +489,7 @@ int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv, | |||
| 489 | struct pucan_rx_msg *msg_list, int msg_count) | 489 | struct pucan_rx_msg *msg_list, int msg_count) |
| 490 | { | 490 | { |
| 491 | void *msg_ptr = msg_list; | 491 | void *msg_ptr = msg_list; |
| 492 | int i, msg_size; | 492 | int i, msg_size = 0; |
| 493 | 493 | ||
| 494 | for (i = 0; i < msg_count; i++) { | 494 | for (i = 0; i < msg_count; i++) { |
| 495 | msg_size = peak_canfd_handle_msg(priv, msg_ptr); | 495 | msg_size = peak_canfd_handle_msg(priv, msg_ptr); |
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index eb7173713bbc..6a6e896e52fa 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c | |||
| @@ -417,7 +417,7 @@ static int slc_open(struct net_device *dev) | |||
| 417 | static void slc_free_netdev(struct net_device *dev) | 417 | static void slc_free_netdev(struct net_device *dev) |
| 418 | { | 418 | { |
| 419 | int i = dev->base_addr; | 419 | int i = dev->base_addr; |
| 420 | free_netdev(dev); | 420 | |
| 421 | slcan_devs[i] = NULL; | 421 | slcan_devs[i] = NULL; |
| 422 | } | 422 | } |
| 423 | 423 | ||
| @@ -436,7 +436,8 @@ static const struct net_device_ops slc_netdev_ops = { | |||
| 436 | static void slc_setup(struct net_device *dev) | 436 | static void slc_setup(struct net_device *dev) |
| 437 | { | 437 | { |
| 438 | dev->netdev_ops = &slc_netdev_ops; | 438 | dev->netdev_ops = &slc_netdev_ops; |
| 439 | dev->destructor = slc_free_netdev; | 439 | dev->needs_free_netdev = true; |
| 440 | dev->priv_destructor = slc_free_netdev; | ||
| 440 | 441 | ||
| 441 | dev->hard_header_len = 0; | 442 | dev->hard_header_len = 0; |
| 442 | dev->addr_len = 0; | 443 | dev->addr_len = 0; |
| @@ -761,8 +762,6 @@ static void __exit slcan_exit(void) | |||
| 761 | if (sl->tty) { | 762 | if (sl->tty) { |
| 762 | printk(KERN_ERR "%s: tty discipline still running\n", | 763 | printk(KERN_ERR "%s: tty discipline still running\n", |
| 763 | dev->name); | 764 | dev->name); |
| 764 | /* Intentionally leak the control block. */ | ||
| 765 | dev->destructor = NULL; | ||
| 766 | } | 765 | } |
| 767 | 766 | ||
| 768 | unregister_netdev(dev); | 767 | unregister_netdev(dev); |
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index eecee7f8dfb7..afcc1312dbaf 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c | |||
| @@ -265,6 +265,8 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev) | |||
| 265 | sizeof(*dm), | 265 | sizeof(*dm), |
| 266 | 1000); | 266 | 1000); |
| 267 | 267 | ||
| 268 | kfree(dm); | ||
| 269 | |||
| 268 | return rc; | 270 | return rc; |
| 269 | } | 271 | } |
| 270 | 272 | ||
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 57913dbbae0a..1ca76e03e965 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c | |||
| @@ -908,8 +908,6 @@ static int peak_usb_probe(struct usb_interface *intf, | |||
| 908 | const struct peak_usb_adapter *peak_usb_adapter = NULL; | 908 | const struct peak_usb_adapter *peak_usb_adapter = NULL; |
| 909 | int i, err = -ENOMEM; | 909 | int i, err = -ENOMEM; |
| 910 | 910 | ||
| 911 | usb_dev = interface_to_usbdev(intf); | ||
| 912 | |||
| 913 | /* get corresponding PCAN-USB adapter */ | 911 | /* get corresponding PCAN-USB adapter */ |
| 914 | for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++) | 912 | for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++) |
| 915 | if (peak_usb_adapters_list[i]->device_id == usb_id_product) { | 913 | if (peak_usb_adapters_list[i]->device_id == usb_id_product) { |
| @@ -920,7 +918,7 @@ static int peak_usb_probe(struct usb_interface *intf, | |||
| 920 | if (!peak_usb_adapter) { | 918 | if (!peak_usb_adapter) { |
| 921 | /* should never come except device_id bad usage in this file */ | 919 | /* should never come except device_id bad usage in this file */ |
| 922 | pr_err("%s: didn't find device id. 0x%x in devices list\n", | 920 | pr_err("%s: didn't find device id. 0x%x in devices list\n", |
| 923 | PCAN_USB_DRIVER_NAME, usb_dev->descriptor.idProduct); | 921 | PCAN_USB_DRIVER_NAME, usb_id_product); |
| 924 | return -ENODEV; | 922 | return -ENODEV; |
| 925 | } | 923 | } |
| 926 | 924 | ||
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index facca33d53e9..a8cb33264ff1 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c | |||
| @@ -152,7 +152,7 @@ static const struct net_device_ops vcan_netdev_ops = { | |||
| 152 | static void vcan_setup(struct net_device *dev) | 152 | static void vcan_setup(struct net_device *dev) |
| 153 | { | 153 | { |
| 154 | dev->type = ARPHRD_CAN; | 154 | dev->type = ARPHRD_CAN; |
| 155 | dev->mtu = CAN_MTU; | 155 | dev->mtu = CANFD_MTU; |
| 156 | dev->hard_header_len = 0; | 156 | dev->hard_header_len = 0; |
| 157 | dev->addr_len = 0; | 157 | dev->addr_len = 0; |
| 158 | dev->tx_queue_len = 0; | 158 | dev->tx_queue_len = 0; |
| @@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev) | |||
| 163 | dev->flags |= IFF_ECHO; | 163 | dev->flags |= IFF_ECHO; |
| 164 | 164 | ||
| 165 | dev->netdev_ops = &vcan_netdev_ops; | 165 | dev->netdev_ops = &vcan_netdev_ops; |
| 166 | dev->destructor = free_netdev; | 166 | dev->needs_free_netdev = true; |
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | static struct rtnl_link_ops vcan_link_ops __read_mostly = { | 169 | static struct rtnl_link_ops vcan_link_ops __read_mostly = { |
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 7fbb24795681..cfe889e8f172 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c | |||
| @@ -150,13 +150,13 @@ static const struct net_device_ops vxcan_netdev_ops = { | |||
| 150 | static void vxcan_setup(struct net_device *dev) | 150 | static void vxcan_setup(struct net_device *dev) |
| 151 | { | 151 | { |
| 152 | dev->type = ARPHRD_CAN; | 152 | dev->type = ARPHRD_CAN; |
| 153 | dev->mtu = CAN_MTU; | 153 | dev->mtu = CANFD_MTU; |
| 154 | dev->hard_header_len = 0; | 154 | dev->hard_header_len = 0; |
| 155 | dev->addr_len = 0; | 155 | dev->addr_len = 0; |
| 156 | dev->tx_queue_len = 0; | 156 | dev->tx_queue_len = 0; |
| 157 | dev->flags = (IFF_NOARP|IFF_ECHO); | 157 | dev->flags = (IFF_NOARP|IFF_ECHO); |
| 158 | dev->netdev_ops = &vxcan_netdev_ops; | 158 | dev->netdev_ops = &vxcan_netdev_ops; |
| 159 | dev->destructor = free_netdev; | 159 | dev->needs_free_netdev = true; |
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | /* forward declaration for rtnl_create_link() */ | 162 | /* forward declaration for rtnl_create_link() */ |
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 96046bb12ca1..14c0be98e0a4 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h | |||
| @@ -114,13 +114,13 @@ static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip, | |||
| 114 | return -EOPNOTSUPP; | 114 | return -EOPNOTSUPP; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev, | 117 | static inline int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, |
| 118 | int src_port, u16 data) | 118 | int src_dev, int src_port, u16 data) |
| 119 | { | 119 | { |
| 120 | return -EOPNOTSUPP; | 120 | return -EOPNOTSUPP; |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip) | 123 | static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip) |
| 124 | { | 124 | { |
| 125 | return -EOPNOTSUPP; | 125 | return -EOPNOTSUPP; |
| 126 | } | 126 | } |
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 149244aac20a..9905b52fe293 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c | |||
| @@ -328,7 +328,6 @@ static void dummy_free_netdev(struct net_device *dev) | |||
| 328 | struct dummy_priv *priv = netdev_priv(dev); | 328 | struct dummy_priv *priv = netdev_priv(dev); |
| 329 | 329 | ||
| 330 | kfree(priv->vfinfo); | 330 | kfree(priv->vfinfo); |
| 331 | free_netdev(dev); | ||
| 332 | } | 331 | } |
| 333 | 332 | ||
| 334 | static void dummy_setup(struct net_device *dev) | 333 | static void dummy_setup(struct net_device *dev) |
| @@ -338,7 +337,8 @@ static void dummy_setup(struct net_device *dev) | |||
| 338 | /* Initialize the device structure. */ | 337 | /* Initialize the device structure. */ |
| 339 | dev->netdev_ops = &dummy_netdev_ops; | 338 | dev->netdev_ops = &dummy_netdev_ops; |
| 340 | dev->ethtool_ops = &dummy_ethtool_ops; | 339 | dev->ethtool_ops = &dummy_ethtool_ops; |
| 341 | dev->destructor = dummy_free_netdev; | 340 | dev->needs_free_netdev = true; |
| 341 | dev->priv_destructor = dummy_free_netdev; | ||
| 342 | 342 | ||
| 343 | /* Fill in device structure with ethernet-generic values. */ | 343 | /* Fill in device structure with ethernet-generic values. */ |
| 344 | dev->flags |= IFF_NOARP; | 344 | dev->flags |= IFF_NOARP; |
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 08d11cede9c9..f5b237e0bd60 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c | |||
| @@ -61,6 +61,8 @@ | |||
| 61 | 61 | ||
| 62 | #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF | 62 | #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF |
| 63 | 63 | ||
| 64 | #define ENA_REGS_ADMIN_INTR_MASK 1 | ||
| 65 | |||
| 64 | /*****************************************************************************/ | 66 | /*****************************************************************************/ |
| 65 | /*****************************************************************************/ | 67 | /*****************************************************************************/ |
| 66 | /*****************************************************************************/ | 68 | /*****************************************************************************/ |
| @@ -232,11 +234,9 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu | |||
| 232 | tail_masked = admin_queue->sq.tail & queue_size_mask; | 234 | tail_masked = admin_queue->sq.tail & queue_size_mask; |
| 233 | 235 | ||
| 234 | /* In case of queue FULL */ | 236 | /* In case of queue FULL */ |
| 235 | cnt = admin_queue->sq.tail - admin_queue->sq.head; | 237 | cnt = atomic_read(&admin_queue->outstanding_cmds); |
| 236 | if (cnt >= admin_queue->q_depth) { | 238 | if (cnt >= admin_queue->q_depth) { |
| 237 | pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n", | 239 | pr_debug("admin queue is full.\n"); |
| 238 | admin_queue->sq.tail, admin_queue->sq.head, | ||
| 239 | admin_queue->q_depth); | ||
| 240 | admin_queue->stats.out_of_space++; | 240 | admin_queue->stats.out_of_space++; |
| 241 | return ERR_PTR(-ENOSPC); | 241 | return ERR_PTR(-ENOSPC); |
| 242 | } | 242 | } |
| @@ -508,15 +508,20 @@ static int ena_com_comp_status_to_errno(u8 comp_status) | |||
| 508 | static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, | 508 | static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, |
| 509 | struct ena_com_admin_queue *admin_queue) | 509 | struct ena_com_admin_queue *admin_queue) |
| 510 | { | 510 | { |
| 511 | unsigned long flags; | 511 | unsigned long flags, timeout; |
| 512 | u32 start_time; | ||
| 513 | int ret; | 512 | int ret; |
| 514 | 513 | ||
| 515 | start_time = ((u32)jiffies_to_usecs(jiffies)); | 514 | timeout = jiffies + ADMIN_CMD_TIMEOUT_US; |
| 515 | |||
| 516 | while (1) { | ||
| 517 | spin_lock_irqsave(&admin_queue->q_lock, flags); | ||
| 518 | ena_com_handle_admin_completion(admin_queue); | ||
| 519 | spin_unlock_irqrestore(&admin_queue->q_lock, flags); | ||
| 520 | |||
| 521 | if (comp_ctx->status != ENA_CMD_SUBMITTED) | ||
| 522 | break; | ||
| 516 | 523 | ||
| 517 | while (comp_ctx->status == ENA_CMD_SUBMITTED) { | 524 | if (time_is_before_jiffies(timeout)) { |
| 518 | if ((((u32)jiffies_to_usecs(jiffies)) - start_time) > | ||
| 519 | ADMIN_CMD_TIMEOUT_US) { | ||
| 520 | pr_err("Wait for completion (polling) timeout\n"); | 525 | pr_err("Wait for completion (polling) timeout\n"); |
| 521 | /* ENA didn't have any completion */ | 526 | /* ENA didn't have any completion */ |
| 522 | spin_lock_irqsave(&admin_queue->q_lock, flags); | 527 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
| @@ -528,10 +533,6 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c | |||
| 528 | goto err; | 533 | goto err; |
| 529 | } | 534 | } |
| 530 | 535 | ||
| 531 | spin_lock_irqsave(&admin_queue->q_lock, flags); | ||
| 532 | ena_com_handle_admin_completion(admin_queue); | ||
| 533 | spin_unlock_irqrestore(&admin_queue->q_lock, flags); | ||
| 534 | |||
| 535 | msleep(100); | 536 | msleep(100); |
| 536 | } | 537 | } |
| 537 | 538 | ||
| @@ -1455,6 +1456,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev) | |||
| 1455 | 1456 | ||
| 1456 | void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) | 1457 | void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) |
| 1457 | { | 1458 | { |
| 1459 | u32 mask_value = 0; | ||
| 1460 | |||
| 1461 | if (polling) | ||
| 1462 | mask_value = ENA_REGS_ADMIN_INTR_MASK; | ||
| 1463 | |||
| 1464 | writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); | ||
| 1458 | ena_dev->admin_queue.polling = polling; | 1465 | ena_dev->admin_queue.polling = polling; |
| 1459 | } | 1466 | } |
| 1460 | 1467 | ||
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 67b2338f8fb3..3ee55e2fd694 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c | |||
| @@ -80,7 +80,6 @@ static const struct ena_stats ena_stats_tx_strings[] = { | |||
| 80 | ENA_STAT_TX_ENTRY(tx_poll), | 80 | ENA_STAT_TX_ENTRY(tx_poll), |
| 81 | ENA_STAT_TX_ENTRY(doorbells), | 81 | ENA_STAT_TX_ENTRY(doorbells), |
| 82 | ENA_STAT_TX_ENTRY(prepare_ctx_err), | 82 | ENA_STAT_TX_ENTRY(prepare_ctx_err), |
| 83 | ENA_STAT_TX_ENTRY(missing_tx_comp), | ||
| 84 | ENA_STAT_TX_ENTRY(bad_req_id), | 83 | ENA_STAT_TX_ENTRY(bad_req_id), |
| 85 | }; | 84 | }; |
| 86 | 85 | ||
| @@ -94,6 +93,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { | |||
| 94 | ENA_STAT_RX_ENTRY(dma_mapping_err), | 93 | ENA_STAT_RX_ENTRY(dma_mapping_err), |
| 95 | ENA_STAT_RX_ENTRY(bad_desc_num), | 94 | ENA_STAT_RX_ENTRY(bad_desc_num), |
| 96 | ENA_STAT_RX_ENTRY(rx_copybreak_pkt), | 95 | ENA_STAT_RX_ENTRY(rx_copybreak_pkt), |
| 96 | ENA_STAT_RX_ENTRY(empty_rx_ring), | ||
| 97 | }; | 97 | }; |
| 98 | 98 | ||
| 99 | static const struct ena_stats ena_stats_ena_com_strings[] = { | 99 | static const struct ena_stats ena_stats_ena_com_strings[] = { |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 7c1214d78855..4f16ed38bcf3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |||
| @@ -190,6 +190,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter) | |||
| 190 | rxr->sgl_size = adapter->max_rx_sgl_size; | 190 | rxr->sgl_size = adapter->max_rx_sgl_size; |
| 191 | rxr->smoothed_interval = | 191 | rxr->smoothed_interval = |
| 192 | ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); | 192 | ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); |
| 193 | rxr->empty_rx_queue = 0; | ||
| 193 | } | 194 | } |
| 194 | } | 195 | } |
| 195 | 196 | ||
| @@ -1078,6 +1079,26 @@ inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring, | |||
| 1078 | rx_ring->per_napi_bytes = 0; | 1079 | rx_ring->per_napi_bytes = 0; |
| 1079 | } | 1080 | } |
| 1080 | 1081 | ||
| 1082 | static inline void ena_unmask_interrupt(struct ena_ring *tx_ring, | ||
| 1083 | struct ena_ring *rx_ring) | ||
| 1084 | { | ||
| 1085 | struct ena_eth_io_intr_reg intr_reg; | ||
| 1086 | |||
| 1087 | /* Update intr register: rx intr delay, | ||
| 1088 | * tx intr delay and interrupt unmask | ||
| 1089 | */ | ||
| 1090 | ena_com_update_intr_reg(&intr_reg, | ||
| 1091 | rx_ring->smoothed_interval, | ||
| 1092 | tx_ring->smoothed_interval, | ||
| 1093 | true); | ||
| 1094 | |||
| 1095 | /* It is a shared MSI-X. | ||
| 1096 | * Tx and Rx CQ have pointer to it. | ||
| 1097 | * So we use one of them to reach the intr reg | ||
| 1098 | */ | ||
| 1099 | ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); | ||
| 1100 | } | ||
| 1101 | |||
| 1081 | static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, | 1102 | static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, |
| 1082 | struct ena_ring *rx_ring) | 1103 | struct ena_ring *rx_ring) |
| 1083 | { | 1104 | { |
| @@ -1108,7 +1129,6 @@ static int ena_io_poll(struct napi_struct *napi, int budget) | |||
| 1108 | { | 1129 | { |
| 1109 | struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); | 1130 | struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); |
| 1110 | struct ena_ring *tx_ring, *rx_ring; | 1131 | struct ena_ring *tx_ring, *rx_ring; |
| 1111 | struct ena_eth_io_intr_reg intr_reg; | ||
| 1112 | 1132 | ||
| 1113 | u32 tx_work_done; | 1133 | u32 tx_work_done; |
| 1114 | u32 rx_work_done; | 1134 | u32 rx_work_done; |
| @@ -1149,22 +1169,9 @@ static int ena_io_poll(struct napi_struct *napi, int budget) | |||
| 1149 | if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) | 1169 | if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) |
| 1150 | ena_adjust_intr_moderation(rx_ring, tx_ring); | 1170 | ena_adjust_intr_moderation(rx_ring, tx_ring); |
| 1151 | 1171 | ||
| 1152 | /* Update intr register: rx intr delay, | 1172 | ena_unmask_interrupt(tx_ring, rx_ring); |
| 1153 | * tx intr delay and interrupt unmask | ||
| 1154 | */ | ||
| 1155 | ena_com_update_intr_reg(&intr_reg, | ||
| 1156 | rx_ring->smoothed_interval, | ||
| 1157 | tx_ring->smoothed_interval, | ||
| 1158 | true); | ||
| 1159 | |||
| 1160 | /* It is a shared MSI-X. | ||
| 1161 | * Tx and Rx CQ have pointer to it. | ||
| 1162 | * So we use one of them to reach the intr reg | ||
| 1163 | */ | ||
| 1164 | ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); | ||
| 1165 | } | 1173 | } |
| 1166 | 1174 | ||
| 1167 | |||
| 1168 | ena_update_ring_numa_node(tx_ring, rx_ring); | 1175 | ena_update_ring_numa_node(tx_ring, rx_ring); |
| 1169 | 1176 | ||
| 1170 | ret = rx_work_done; | 1177 | ret = rx_work_done; |
| @@ -1485,6 +1492,11 @@ static int ena_up_complete(struct ena_adapter *adapter) | |||
| 1485 | 1492 | ||
| 1486 | ena_napi_enable_all(adapter); | 1493 | ena_napi_enable_all(adapter); |
| 1487 | 1494 | ||
| 1495 | /* Enable completion queues interrupt */ | ||
| 1496 | for (i = 0; i < adapter->num_queues; i++) | ||
| 1497 | ena_unmask_interrupt(&adapter->tx_ring[i], | ||
| 1498 | &adapter->rx_ring[i]); | ||
| 1499 | |||
| 1488 | /* schedule napi in case we had pending packets | 1500 | /* schedule napi in case we had pending packets |
| 1489 | * from the last time we disable napi | 1501 | * from the last time we disable napi |
| 1490 | */ | 1502 | */ |
| @@ -1532,6 +1544,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) | |||
| 1532 | "Failed to get TX queue handlers. TX queue num %d rc: %d\n", | 1544 | "Failed to get TX queue handlers. TX queue num %d rc: %d\n", |
| 1533 | qid, rc); | 1545 | qid, rc); |
| 1534 | ena_com_destroy_io_queue(ena_dev, ena_qid); | 1546 | ena_com_destroy_io_queue(ena_dev, ena_qid); |
| 1547 | return rc; | ||
| 1535 | } | 1548 | } |
| 1536 | 1549 | ||
| 1537 | ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); | 1550 | ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); |
| @@ -1596,6 +1609,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) | |||
| 1596 | "Failed to get RX queue handlers. RX queue num %d rc: %d\n", | 1609 | "Failed to get RX queue handlers. RX queue num %d rc: %d\n", |
| 1597 | qid, rc); | 1610 | qid, rc); |
| 1598 | ena_com_destroy_io_queue(ena_dev, ena_qid); | 1611 | ena_com_destroy_io_queue(ena_dev, ena_qid); |
| 1612 | return rc; | ||
| 1599 | } | 1613 | } |
| 1600 | 1614 | ||
| 1601 | ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); | 1615 | ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); |
| @@ -1981,6 +1995,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1981 | 1995 | ||
| 1982 | tx_info->tx_descs = nb_hw_desc; | 1996 | tx_info->tx_descs = nb_hw_desc; |
| 1983 | tx_info->last_jiffies = jiffies; | 1997 | tx_info->last_jiffies = jiffies; |
| 1998 | tx_info->print_once = 0; | ||
| 1984 | 1999 | ||
| 1985 | tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, | 2000 | tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, |
| 1986 | tx_ring->ring_size); | 2001 | tx_ring->ring_size); |
| @@ -2550,13 +2565,44 @@ err: | |||
| 2550 | "Reset attempt failed. Can not reset the device\n"); | 2565 | "Reset attempt failed. Can not reset the device\n"); |
| 2551 | } | 2566 | } |
| 2552 | 2567 | ||
| 2553 | static void check_for_missing_tx_completions(struct ena_adapter *adapter) | 2568 | static int check_missing_comp_in_queue(struct ena_adapter *adapter, |
| 2569 | struct ena_ring *tx_ring) | ||
| 2554 | { | 2570 | { |
| 2555 | struct ena_tx_buffer *tx_buf; | 2571 | struct ena_tx_buffer *tx_buf; |
| 2556 | unsigned long last_jiffies; | 2572 | unsigned long last_jiffies; |
| 2573 | u32 missed_tx = 0; | ||
| 2574 | int i; | ||
| 2575 | |||
| 2576 | for (i = 0; i < tx_ring->ring_size; i++) { | ||
| 2577 | tx_buf = &tx_ring->tx_buffer_info[i]; | ||
| 2578 | last_jiffies = tx_buf->last_jiffies; | ||
| 2579 | if (unlikely(last_jiffies && | ||
| 2580 | time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { | ||
| 2581 | if (!tx_buf->print_once) | ||
| 2582 | netif_notice(adapter, tx_err, adapter->netdev, | ||
| 2583 | "Found a Tx that wasn't completed on time, qid %d, index %d.\n", | ||
| 2584 | tx_ring->qid, i); | ||
| 2585 | |||
| 2586 | tx_buf->print_once = 1; | ||
| 2587 | missed_tx++; | ||
| 2588 | |||
| 2589 | if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { | ||
| 2590 | netif_err(adapter, tx_err, adapter->netdev, | ||
| 2591 | "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", | ||
| 2592 | missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); | ||
| 2593 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | ||
| 2594 | return -EIO; | ||
| 2595 | } | ||
| 2596 | } | ||
| 2597 | } | ||
| 2598 | |||
| 2599 | return 0; | ||
| 2600 | } | ||
| 2601 | |||
| 2602 | static void check_for_missing_tx_completions(struct ena_adapter *adapter) | ||
| 2603 | { | ||
| 2557 | struct ena_ring *tx_ring; | 2604 | struct ena_ring *tx_ring; |
| 2558 | int i, j, budget; | 2605 | int i, budget, rc; |
| 2559 | u32 missed_tx; | ||
| 2560 | 2606 | ||
| 2561 | /* Make sure the driver doesn't turn the device in other process */ | 2607 | /* Make sure the driver doesn't turn the device in other process */ |
| 2562 | smp_rmb(); | 2608 | smp_rmb(); |
| @@ -2572,31 +2618,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) | |||
| 2572 | for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { | 2618 | for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { |
| 2573 | tx_ring = &adapter->tx_ring[i]; | 2619 | tx_ring = &adapter->tx_ring[i]; |
| 2574 | 2620 | ||
| 2575 | for (j = 0; j < tx_ring->ring_size; j++) { | 2621 | rc = check_missing_comp_in_queue(adapter, tx_ring); |
| 2576 | tx_buf = &tx_ring->tx_buffer_info[j]; | 2622 | if (unlikely(rc)) |
| 2577 | last_jiffies = tx_buf->last_jiffies; | 2623 | return; |
| 2578 | if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { | ||
| 2579 | netif_notice(adapter, tx_err, adapter->netdev, | ||
| 2580 | "Found a Tx that wasn't completed on time, qid %d, index %d.\n", | ||
| 2581 | tx_ring->qid, j); | ||
| 2582 | |||
| 2583 | u64_stats_update_begin(&tx_ring->syncp); | ||
| 2584 | missed_tx = tx_ring->tx_stats.missing_tx_comp++; | ||
| 2585 | u64_stats_update_end(&tx_ring->syncp); | ||
| 2586 | |||
| 2587 | /* Clear last jiffies so the lost buffer won't | ||
| 2588 | * be counted twice. | ||
| 2589 | */ | ||
| 2590 | tx_buf->last_jiffies = 0; | ||
| 2591 | |||
| 2592 | if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { | ||
| 2593 | netif_err(adapter, tx_err, adapter->netdev, | ||
| 2594 | "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n", | ||
| 2595 | missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); | ||
| 2596 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | ||
| 2597 | } | ||
| 2598 | } | ||
| 2599 | } | ||
| 2600 | 2624 | ||
| 2601 | budget--; | 2625 | budget--; |
| 2602 | if (!budget) | 2626 | if (!budget) |
| @@ -2606,6 +2630,58 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) | |||
| 2606 | adapter->last_monitored_tx_qid = i % adapter->num_queues; | 2630 | adapter->last_monitored_tx_qid = i % adapter->num_queues; |
| 2607 | } | 2631 | } |
| 2608 | 2632 | ||
| 2633 | /* trigger napi schedule after 2 consecutive detections */ | ||
| 2634 | #define EMPTY_RX_REFILL 2 | ||
| 2635 | /* For the rare case where the device runs out of Rx descriptors and the | ||
| 2636 | * napi handler failed to refill new Rx descriptors (due to a lack of memory | ||
| 2637 | * for example). | ||
| 2638 | * This case will lead to a deadlock: | ||
| 2639 | * The device won't send interrupts since all the new Rx packets will be dropped | ||
| 2640 | * The napi handler won't allocate new Rx descriptors so the device will be | ||
| 2641 | * able to send new packets. | ||
| 2642 | * | ||
| 2643 | * This scenario can happen when the kernel's vm.min_free_kbytes is too small. | ||
| 2644 | * It is recommended to have at least 512MB, with a minimum of 128MB for | ||
| 2645 | * constrained environment). | ||
| 2646 | * | ||
| 2647 | * When such a situation is detected - Reschedule napi | ||
| 2648 | */ | ||
| 2649 | static void check_for_empty_rx_ring(struct ena_adapter *adapter) | ||
| 2650 | { | ||
| 2651 | struct ena_ring *rx_ring; | ||
| 2652 | int i, refill_required; | ||
| 2653 | |||
| 2654 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | ||
| 2655 | return; | ||
| 2656 | |||
| 2657 | if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) | ||
| 2658 | return; | ||
| 2659 | |||
| 2660 | for (i = 0; i < adapter->num_queues; i++) { | ||
| 2661 | rx_ring = &adapter->rx_ring[i]; | ||
| 2662 | |||
| 2663 | refill_required = | ||
| 2664 | ena_com_sq_empty_space(rx_ring->ena_com_io_sq); | ||
| 2665 | if (unlikely(refill_required == (rx_ring->ring_size - 1))) { | ||
| 2666 | rx_ring->empty_rx_queue++; | ||
| 2667 | |||
| 2668 | if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { | ||
| 2669 | u64_stats_update_begin(&rx_ring->syncp); | ||
| 2670 | rx_ring->rx_stats.empty_rx_ring++; | ||
| 2671 | u64_stats_update_end(&rx_ring->syncp); | ||
| 2672 | |||
| 2673 | netif_err(adapter, drv, adapter->netdev, | ||
| 2674 | "trigger refill for ring %d\n", i); | ||
| 2675 | |||
| 2676 | napi_schedule(rx_ring->napi); | ||
| 2677 | rx_ring->empty_rx_queue = 0; | ||
| 2678 | } | ||
| 2679 | } else { | ||
| 2680 | rx_ring->empty_rx_queue = 0; | ||
| 2681 | } | ||
| 2682 | } | ||
| 2683 | } | ||
| 2684 | |||
| 2609 | /* Check for keep alive expiration */ | 2685 | /* Check for keep alive expiration */ |
| 2610 | static void check_for_missing_keep_alive(struct ena_adapter *adapter) | 2686 | static void check_for_missing_keep_alive(struct ena_adapter *adapter) |
| 2611 | { | 2687 | { |
| @@ -2660,6 +2736,8 @@ static void ena_timer_service(unsigned long data) | |||
| 2660 | 2736 | ||
| 2661 | check_for_missing_tx_completions(adapter); | 2737 | check_for_missing_tx_completions(adapter); |
| 2662 | 2738 | ||
| 2739 | check_for_empty_rx_ring(adapter); | ||
| 2740 | |||
| 2663 | if (debug_area) | 2741 | if (debug_area) |
| 2664 | ena_dump_stats_to_buf(adapter, debug_area); | 2742 | ena_dump_stats_to_buf(adapter, debug_area); |
| 2665 | 2743 | ||
| @@ -2840,6 +2918,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) | |||
| 2840 | { | 2918 | { |
| 2841 | int release_bars; | 2919 | int release_bars; |
| 2842 | 2920 | ||
| 2921 | if (ena_dev->mem_bar) | ||
| 2922 | devm_iounmap(&pdev->dev, ena_dev->mem_bar); | ||
| 2923 | |||
| 2924 | devm_iounmap(&pdev->dev, ena_dev->reg_bar); | ||
| 2925 | |||
| 2843 | release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; | 2926 | release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; |
| 2844 | pci_release_selected_regions(pdev, release_bars); | 2927 | pci_release_selected_regions(pdev, release_bars); |
| 2845 | } | 2928 | } |
| @@ -2927,8 +3010,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2927 | goto err_free_ena_dev; | 3010 | goto err_free_ena_dev; |
| 2928 | } | 3011 | } |
| 2929 | 3012 | ||
| 2930 | ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR), | 3013 | ena_dev->reg_bar = devm_ioremap(&pdev->dev, |
| 2931 | pci_resource_len(pdev, ENA_REG_BAR)); | 3014 | pci_resource_start(pdev, ENA_REG_BAR), |
| 3015 | pci_resource_len(pdev, ENA_REG_BAR)); | ||
| 2932 | if (!ena_dev->reg_bar) { | 3016 | if (!ena_dev->reg_bar) { |
| 2933 | dev_err(&pdev->dev, "failed to remap regs bar\n"); | 3017 | dev_err(&pdev->dev, "failed to remap regs bar\n"); |
| 2934 | rc = -EFAULT; | 3018 | rc = -EFAULT; |
| @@ -2948,8 +3032,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2948 | ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); | 3032 | ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); |
| 2949 | 3033 | ||
| 2950 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { | 3034 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
| 2951 | ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR), | 3035 | ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, |
| 2952 | pci_resource_len(pdev, ENA_MEM_BAR)); | 3036 | pci_resource_start(pdev, ENA_MEM_BAR), |
| 3037 | pci_resource_len(pdev, ENA_MEM_BAR)); | ||
| 2953 | if (!ena_dev->mem_bar) { | 3038 | if (!ena_dev->mem_bar) { |
| 2954 | rc = -EFAULT; | 3039 | rc = -EFAULT; |
| 2955 | goto err_device_destroy; | 3040 | goto err_device_destroy; |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 0e22bce6239d..a4d3d5e21068 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h | |||
| @@ -45,7 +45,7 @@ | |||
| 45 | 45 | ||
| 46 | #define DRV_MODULE_VER_MAJOR 1 | 46 | #define DRV_MODULE_VER_MAJOR 1 |
| 47 | #define DRV_MODULE_VER_MINOR 1 | 47 | #define DRV_MODULE_VER_MINOR 1 |
| 48 | #define DRV_MODULE_VER_SUBMINOR 2 | 48 | #define DRV_MODULE_VER_SUBMINOR 7 |
| 49 | 49 | ||
| 50 | #define DRV_MODULE_NAME "ena" | 50 | #define DRV_MODULE_NAME "ena" |
| 51 | #ifndef DRV_MODULE_VERSION | 51 | #ifndef DRV_MODULE_VERSION |
| @@ -146,7 +146,18 @@ struct ena_tx_buffer { | |||
| 146 | u32 tx_descs; | 146 | u32 tx_descs; |
| 147 | /* num of buffers used by this skb */ | 147 | /* num of buffers used by this skb */ |
| 148 | u32 num_of_bufs; | 148 | u32 num_of_bufs; |
| 149 | /* Save the last jiffies to detect missing tx packets */ | 149 | |
| 150 | /* Used for detect missing tx packets to limit the number of prints */ | ||
| 151 | u32 print_once; | ||
| 152 | /* Save the last jiffies to detect missing tx packets | ||
| 153 | * | ||
| 154 | * sets to non zero value on ena_start_xmit and set to zero on | ||
| 155 | * napi and timer_Service_routine. | ||
| 156 | * | ||
| 157 | * while this value is not protected by lock, | ||
| 158 | * a given packet is not expected to be handled by ena_start_xmit | ||
| 159 | * and by napi/timer_service at the same time. | ||
| 160 | */ | ||
| 150 | unsigned long last_jiffies; | 161 | unsigned long last_jiffies; |
| 151 | struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; | 162 | struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; |
| 152 | } ____cacheline_aligned; | 163 | } ____cacheline_aligned; |
| @@ -170,7 +181,6 @@ struct ena_stats_tx { | |||
| 170 | u64 napi_comp; | 181 | u64 napi_comp; |
| 171 | u64 tx_poll; | 182 | u64 tx_poll; |
| 172 | u64 doorbells; | 183 | u64 doorbells; |
| 173 | u64 missing_tx_comp; | ||
| 174 | u64 bad_req_id; | 184 | u64 bad_req_id; |
| 175 | }; | 185 | }; |
| 176 | 186 | ||
| @@ -184,6 +194,7 @@ struct ena_stats_rx { | |||
| 184 | u64 dma_mapping_err; | 194 | u64 dma_mapping_err; |
| 185 | u64 bad_desc_num; | 195 | u64 bad_desc_num; |
| 186 | u64 rx_copybreak_pkt; | 196 | u64 rx_copybreak_pkt; |
| 197 | u64 empty_rx_ring; | ||
| 187 | }; | 198 | }; |
| 188 | 199 | ||
| 189 | struct ena_ring { | 200 | struct ena_ring { |
| @@ -231,6 +242,7 @@ struct ena_ring { | |||
| 231 | struct ena_stats_tx tx_stats; | 242 | struct ena_stats_tx tx_stats; |
| 232 | struct ena_stats_rx rx_stats; | 243 | struct ena_stats_rx rx_stats; |
| 233 | }; | 244 | }; |
| 245 | int empty_rx_queue; | ||
| 234 | } ____cacheline_aligned; | 246 | } ____cacheline_aligned; |
| 235 | 247 | ||
| 236 | struct ena_stats_dev { | 248 | struct ena_stats_dev { |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index b3bc87fe3764..0a98c369df20 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c | |||
| @@ -324,7 +324,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, | |||
| 324 | struct xgbe_ring *ring, | 324 | struct xgbe_ring *ring, |
| 325 | struct xgbe_ring_data *rdata) | 325 | struct xgbe_ring_data *rdata) |
| 326 | { | 326 | { |
| 327 | int order, ret; | 327 | int ret; |
| 328 | 328 | ||
| 329 | if (!ring->rx_hdr_pa.pages) { | 329 | if (!ring->rx_hdr_pa.pages) { |
| 330 | ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0); | 330 | ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0); |
| @@ -333,9 +333,8 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, | |||
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | if (!ring->rx_buf_pa.pages) { | 335 | if (!ring->rx_buf_pa.pages) { |
| 336 | order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0); | ||
| 337 | ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC, | 336 | ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC, |
| 338 | order); | 337 | PAGE_ALLOC_COSTLY_ORDER); |
| 339 | if (ret) | 338 | if (ret) |
| 340 | return ret; | 339 | return ret; |
| 341 | } | 340 | } |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index b8e3d88f0879..a66aee51ab5b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h | |||
| @@ -193,9 +193,6 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, | |||
| 193 | struct aq_hw_caps_s *aq_hw_caps, | 193 | struct aq_hw_caps_s *aq_hw_caps, |
| 194 | u32 *regs_buff); | 194 | u32 *regs_buff); |
| 195 | 195 | ||
| 196 | int hw_atl_utils_hw_get_settings(struct aq_hw_s *self, | ||
| 197 | struct ethtool_cmd *cmd); | ||
| 198 | |||
| 199 | int hw_atl_utils_hw_set_power(struct aq_hw_s *self, | 196 | int hw_atl_utils_hw_set_power(struct aq_hw_s *self, |
| 200 | unsigned int power_state); | 197 | unsigned int power_state); |
| 201 | 198 | ||
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 099b374c1b17..5274501428e4 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
| @@ -2026,9 +2026,12 @@ static int bcm_sysport_probe(struct platform_device *pdev) | |||
| 2026 | priv->num_rx_desc_words = params->num_rx_desc_words; | 2026 | priv->num_rx_desc_words = params->num_rx_desc_words; |
| 2027 | 2027 | ||
| 2028 | priv->irq0 = platform_get_irq(pdev, 0); | 2028 | priv->irq0 = platform_get_irq(pdev, 0); |
| 2029 | if (!priv->is_lite) | 2029 | if (!priv->is_lite) { |
| 2030 | priv->irq1 = platform_get_irq(pdev, 1); | 2030 | priv->irq1 = platform_get_irq(pdev, 1); |
| 2031 | priv->wol_irq = platform_get_irq(pdev, 2); | 2031 | priv->wol_irq = platform_get_irq(pdev, 2); |
| 2032 | } else { | ||
| 2033 | priv->wol_irq = platform_get_irq(pdev, 1); | ||
| 2034 | } | ||
| 2032 | if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { | 2035 | if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { |
| 2033 | dev_err(&pdev->dev, "invalid interrupts\n"); | 2036 | dev_err(&pdev->dev, "invalid interrupts\n"); |
| 2034 | ret = -EINVAL; | 2037 | ret = -EINVAL; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index eccb3d1b6abb..f619c4cac51f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
| @@ -1926,7 +1926,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
| 1926 | } | 1926 | } |
| 1927 | 1927 | ||
| 1928 | /* select a non-FCoE queue */ | 1928 | /* select a non-FCoE queue */ |
| 1929 | return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); | 1929 | return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); |
| 1930 | } | 1930 | } |
| 1931 | 1931 | ||
| 1932 | void bnx2x_set_num_queues(struct bnx2x *bp) | 1932 | void bnx2x_set_num_queues(struct bnx2x *bp) |
| @@ -3883,15 +3883,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3883 | /* when transmitting in a vf, start bd must hold the ethertype | 3883 | /* when transmitting in a vf, start bd must hold the ethertype |
| 3884 | * for fw to enforce it | 3884 | * for fw to enforce it |
| 3885 | */ | 3885 | */ |
| 3886 | u16 vlan_tci = 0; | ||
| 3886 | #ifndef BNX2X_STOP_ON_ERROR | 3887 | #ifndef BNX2X_STOP_ON_ERROR |
| 3887 | if (IS_VF(bp)) | 3888 | if (IS_VF(bp)) { |
| 3888 | #endif | 3889 | #endif |
| 3889 | tx_start_bd->vlan_or_ethertype = | 3890 | /* Still need to consider inband vlan for enforced */ |
| 3890 | cpu_to_le16(ntohs(eth->h_proto)); | 3891 | if (__vlan_get_tag(skb, &vlan_tci)) { |
| 3892 | tx_start_bd->vlan_or_ethertype = | ||
| 3893 | cpu_to_le16(ntohs(eth->h_proto)); | ||
| 3894 | } else { | ||
| 3895 | tx_start_bd->bd_flags.as_bitfield |= | ||
| 3896 | (X_ETH_INBAND_VLAN << | ||
| 3897 | ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); | ||
| 3898 | tx_start_bd->vlan_or_ethertype = | ||
| 3899 | cpu_to_le16(vlan_tci); | ||
| 3900 | } | ||
| 3891 | #ifndef BNX2X_STOP_ON_ERROR | 3901 | #ifndef BNX2X_STOP_ON_ERROR |
| 3892 | else | 3902 | } else { |
| 3893 | /* used by FW for packet accounting */ | 3903 | /* used by FW for packet accounting */ |
| 3894 | tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); | 3904 | tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); |
| 3905 | } | ||
| 3895 | #endif | 3906 | #endif |
| 3896 | } | 3907 | } |
| 3897 | 3908 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index a851f95c307a..349a46593abf 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -12729,7 +12729,7 @@ static int bnx2x_set_mc_list(struct bnx2x *bp) | |||
| 12729 | } else { | 12729 | } else { |
| 12730 | /* If no mc addresses are required, flush the configuration */ | 12730 | /* If no mc addresses are required, flush the configuration */ |
| 12731 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); | 12731 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); |
| 12732 | if (rc) | 12732 | if (rc < 0) |
| 12733 | BNX2X_ERR("Failed to clear multicast configuration %d\n", | 12733 | BNX2X_ERR("Failed to clear multicast configuration %d\n", |
| 12734 | rc); | 12734 | rc); |
| 12735 | } | 12735 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index bdfd53b46bc5..9ca994d0bab6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
| @@ -901,6 +901,8 @@ static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
| 901 | /* release VF resources */ | 901 | /* release VF resources */ |
| 902 | bnx2x_vf_free_resc(bp, vf); | 902 | bnx2x_vf_free_resc(bp, vf); |
| 903 | 903 | ||
| 904 | vf->malicious = false; | ||
| 905 | |||
| 904 | /* re-open the mailbox */ | 906 | /* re-open the mailbox */ |
| 905 | bnx2x_vf_enable_mbx(bp, vf->abs_vfid); | 907 | bnx2x_vf_enable_mbx(bp, vf->abs_vfid); |
| 906 | return; | 908 | return; |
| @@ -1822,9 +1824,11 @@ get_vf: | |||
| 1822 | vf->abs_vfid, qidx); | 1824 | vf->abs_vfid, qidx); |
| 1823 | bnx2x_vf_handle_rss_update_eqe(bp, vf); | 1825 | bnx2x_vf_handle_rss_update_eqe(bp, vf); |
| 1824 | case EVENT_RING_OPCODE_VF_FLR: | 1826 | case EVENT_RING_OPCODE_VF_FLR: |
| 1825 | case EVENT_RING_OPCODE_MALICIOUS_VF: | ||
| 1826 | /* Do nothing for now */ | 1827 | /* Do nothing for now */ |
| 1827 | return 0; | 1828 | return 0; |
| 1829 | case EVENT_RING_OPCODE_MALICIOUS_VF: | ||
| 1830 | vf->malicious = true; | ||
| 1831 | return 0; | ||
| 1828 | } | 1832 | } |
| 1829 | 1833 | ||
| 1830 | return 0; | 1834 | return 0; |
| @@ -1905,6 +1909,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) | |||
| 1905 | continue; | 1909 | continue; |
| 1906 | } | 1910 | } |
| 1907 | 1911 | ||
| 1912 | if (vf->malicious) { | ||
| 1913 | DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), | ||
| 1914 | "vf %d malicious so no stats for it\n", | ||
| 1915 | vf->abs_vfid); | ||
| 1916 | continue; | ||
| 1917 | } | ||
| 1918 | |||
| 1908 | DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), | 1919 | DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), |
| 1909 | "add addresses for vf %d\n", vf->abs_vfid); | 1920 | "add addresses for vf %d\n", vf->abs_vfid); |
| 1910 | for_each_vfq(vf, j) { | 1921 | for_each_vfq(vf, j) { |
| @@ -3042,7 +3053,7 @@ void bnx2x_vf_pci_dealloc(struct bnx2x *bp) | |||
| 3042 | { | 3053 | { |
| 3043 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, | 3054 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, |
| 3044 | sizeof(struct bnx2x_vf_mbx_msg)); | 3055 | sizeof(struct bnx2x_vf_mbx_msg)); |
| 3045 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, | 3056 | BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping, |
| 3046 | sizeof(union pf_vf_bulletin)); | 3057 | sizeof(union pf_vf_bulletin)); |
| 3047 | } | 3058 | } |
| 3048 | 3059 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 888d0b6632e8..53466f6cebab 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | |||
| @@ -141,6 +141,7 @@ struct bnx2x_virtf { | |||
| 141 | #define VF_RESET 3 /* VF FLR'd, pending cleanup */ | 141 | #define VF_RESET 3 /* VF FLR'd, pending cleanup */ |
| 142 | 142 | ||
| 143 | bool flr_clnup_stage; /* true during flr cleanup */ | 143 | bool flr_clnup_stage; /* true during flr cleanup */ |
| 144 | bool malicious; /* true if FW indicated so, until FLR */ | ||
| 144 | 145 | ||
| 145 | /* dma */ | 146 | /* dma */ |
| 146 | dma_addr_t fw_stat_map; | 147 | dma_addr_t fw_stat_map; |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 03f55daecb20..74e8e215524d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -1301,10 +1301,11 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, | |||
| 1301 | cp_cons = NEXT_CMP(cp_cons); | 1301 | cp_cons = NEXT_CMP(cp_cons); |
| 1302 | } | 1302 | } |
| 1303 | 1303 | ||
| 1304 | if (unlikely(agg_bufs > MAX_SKB_FRAGS)) { | 1304 | if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { |
| 1305 | bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); | 1305 | bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); |
| 1306 | netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", | 1306 | if (agg_bufs > MAX_SKB_FRAGS) |
| 1307 | agg_bufs, (int)MAX_SKB_FRAGS); | 1307 | netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", |
| 1308 | agg_bufs, (int)MAX_SKB_FRAGS); | ||
| 1308 | return NULL; | 1309 | return NULL; |
| 1309 | } | 1310 | } |
| 1310 | 1311 | ||
| @@ -1562,6 +1563,45 @@ next_rx_no_prod: | |||
| 1562 | return rc; | 1563 | return rc; |
| 1563 | } | 1564 | } |
| 1564 | 1565 | ||
| 1566 | /* In netpoll mode, if we are using a combined completion ring, we need to | ||
| 1567 | * discard the rx packets and recycle the buffers. | ||
| 1568 | */ | ||
| 1569 | static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi, | ||
| 1570 | u32 *raw_cons, u8 *event) | ||
| 1571 | { | ||
| 1572 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; | ||
| 1573 | u32 tmp_raw_cons = *raw_cons; | ||
| 1574 | struct rx_cmp_ext *rxcmp1; | ||
| 1575 | struct rx_cmp *rxcmp; | ||
| 1576 | u16 cp_cons; | ||
| 1577 | u8 cmp_type; | ||
| 1578 | |||
| 1579 | cp_cons = RING_CMP(tmp_raw_cons); | ||
| 1580 | rxcmp = (struct rx_cmp *) | ||
| 1581 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; | ||
| 1582 | |||
| 1583 | tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); | ||
| 1584 | cp_cons = RING_CMP(tmp_raw_cons); | ||
| 1585 | rxcmp1 = (struct rx_cmp_ext *) | ||
| 1586 | &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; | ||
| 1587 | |||
| 1588 | if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) | ||
| 1589 | return -EBUSY; | ||
| 1590 | |||
| 1591 | cmp_type = RX_CMP_TYPE(rxcmp); | ||
| 1592 | if (cmp_type == CMP_TYPE_RX_L2_CMP) { | ||
| 1593 | rxcmp1->rx_cmp_cfa_code_errors_v2 |= | ||
| 1594 | cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); | ||
| 1595 | } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { | ||
| 1596 | struct rx_tpa_end_cmp_ext *tpa_end1; | ||
| 1597 | |||
| 1598 | tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; | ||
| 1599 | tpa_end1->rx_tpa_end_cmp_errors_v2 |= | ||
| 1600 | cpu_to_le32(RX_TPA_END_CMP_ERRORS); | ||
| 1601 | } | ||
| 1602 | return bnxt_rx_pkt(bp, bnapi, raw_cons, event); | ||
| 1603 | } | ||
| 1604 | |||
| 1565 | #define BNXT_GET_EVENT_PORT(data) \ | 1605 | #define BNXT_GET_EVENT_PORT(data) \ |
| 1566 | ((data) & \ | 1606 | ((data) & \ |
| 1567 | ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) | 1607 | ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) |
| @@ -1744,7 +1784,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) | |||
| 1744 | if (unlikely(tx_pkts > bp->tx_wake_thresh)) | 1784 | if (unlikely(tx_pkts > bp->tx_wake_thresh)) |
| 1745 | rx_pkts = budget; | 1785 | rx_pkts = budget; |
| 1746 | } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { | 1786 | } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { |
| 1747 | rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); | 1787 | if (likely(budget)) |
| 1788 | rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); | ||
| 1789 | else | ||
| 1790 | rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons, | ||
| 1791 | &event); | ||
| 1748 | if (likely(rc >= 0)) | 1792 | if (likely(rc >= 0)) |
| 1749 | rx_pkts += rc; | 1793 | rx_pkts += rc; |
| 1750 | else if (rc == -EBUSY) /* partial completion */ | 1794 | else if (rc == -EBUSY) /* partial completion */ |
| @@ -6663,12 +6707,11 @@ static void bnxt_poll_controller(struct net_device *dev) | |||
| 6663 | struct bnxt *bp = netdev_priv(dev); | 6707 | struct bnxt *bp = netdev_priv(dev); |
| 6664 | int i; | 6708 | int i; |
| 6665 | 6709 | ||
| 6666 | for (i = 0; i < bp->cp_nr_rings; i++) { | 6710 | /* Only process tx rings/combined rings in netpoll mode. */ |
| 6667 | struct bnxt_irq *irq = &bp->irq_tbl[i]; | 6711 | for (i = 0; i < bp->tx_nr_rings; i++) { |
| 6712 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; | ||
| 6668 | 6713 | ||
| 6669 | disable_irq(irq->vector); | 6714 | napi_schedule(&txr->bnapi->napi); |
| 6670 | irq->handler(irq->vector, bp->bnapi[i]); | ||
| 6671 | enable_irq(irq->vector); | ||
| 6672 | } | 6715 | } |
| 6673 | } | 6716 | } |
| 6674 | #endif | 6717 | #endif |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 3ef42dbc6327..d46a85041083 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
| @@ -374,12 +374,16 @@ struct rx_tpa_end_cmp_ext { | |||
| 374 | 374 | ||
| 375 | __le32 rx_tpa_end_cmp_errors_v2; | 375 | __le32 rx_tpa_end_cmp_errors_v2; |
| 376 | #define RX_TPA_END_CMP_V2 (0x1 << 0) | 376 | #define RX_TPA_END_CMP_V2 (0x1 << 0) |
| 377 | #define RX_TPA_END_CMP_ERRORS (0x7fff << 1) | 377 | #define RX_TPA_END_CMP_ERRORS (0x3 << 1) |
| 378 | #define RX_TPA_END_CMPL_ERRORS_SHIFT 1 | 378 | #define RX_TPA_END_CMPL_ERRORS_SHIFT 1 |
| 379 | 379 | ||
| 380 | u32 rx_tpa_end_cmp_start_opaque; | 380 | u32 rx_tpa_end_cmp_start_opaque; |
| 381 | }; | 381 | }; |
| 382 | 382 | ||
| 383 | #define TPA_END_ERRORS(rx_tpa_end_ext) \ | ||
| 384 | ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \ | ||
| 385 | cpu_to_le32(RX_TPA_END_CMP_ERRORS)) | ||
| 386 | |||
| 383 | #define DB_IDX_MASK 0xffffff | 387 | #define DB_IDX_MASK 0xffffff |
| 384 | #define DB_IDX_VALID (0x1 << 26) | 388 | #define DB_IDX_VALID (0x1 << 26) |
| 385 | #define DB_IRQ_DIS (0x1 << 27) | 389 | #define DB_IRQ_DIS (0x1 << 27) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 38a5c6764bb5..53309f659951 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
| @@ -2171,9 +2171,10 @@ static int cxgb_up(struct adapter *adap) | |||
| 2171 | { | 2171 | { |
| 2172 | int err; | 2172 | int err; |
| 2173 | 2173 | ||
| 2174 | mutex_lock(&uld_mutex); | ||
| 2174 | err = setup_sge_queues(adap); | 2175 | err = setup_sge_queues(adap); |
| 2175 | if (err) | 2176 | if (err) |
| 2176 | goto out; | 2177 | goto rel_lock; |
| 2177 | err = setup_rss(adap); | 2178 | err = setup_rss(adap); |
| 2178 | if (err) | 2179 | if (err) |
| 2179 | goto freeq; | 2180 | goto freeq; |
| @@ -2196,23 +2197,28 @@ static int cxgb_up(struct adapter *adap) | |||
| 2196 | if (err) | 2197 | if (err) |
| 2197 | goto irq_err; | 2198 | goto irq_err; |
| 2198 | } | 2199 | } |
| 2200 | |||
| 2199 | enable_rx(adap); | 2201 | enable_rx(adap); |
| 2200 | t4_sge_start(adap); | 2202 | t4_sge_start(adap); |
| 2201 | t4_intr_enable(adap); | 2203 | t4_intr_enable(adap); |
| 2202 | adap->flags |= FULL_INIT_DONE; | 2204 | adap->flags |= FULL_INIT_DONE; |
| 2205 | mutex_unlock(&uld_mutex); | ||
| 2206 | |||
| 2203 | notify_ulds(adap, CXGB4_STATE_UP); | 2207 | notify_ulds(adap, CXGB4_STATE_UP); |
| 2204 | #if IS_ENABLED(CONFIG_IPV6) | 2208 | #if IS_ENABLED(CONFIG_IPV6) |
| 2205 | update_clip(adap); | 2209 | update_clip(adap); |
| 2206 | #endif | 2210 | #endif |
| 2207 | /* Initialize hash mac addr list*/ | 2211 | /* Initialize hash mac addr list*/ |
| 2208 | INIT_LIST_HEAD(&adap->mac_hlist); | 2212 | INIT_LIST_HEAD(&adap->mac_hlist); |
| 2209 | out: | ||
| 2210 | return err; | 2213 | return err; |
| 2214 | |||
| 2211 | irq_err: | 2215 | irq_err: |
| 2212 | dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); | 2216 | dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); |
| 2213 | freeq: | 2217 | freeq: |
| 2214 | t4_free_sge_resources(adap); | 2218 | t4_free_sge_resources(adap); |
| 2215 | goto out; | 2219 | rel_lock: |
| 2220 | mutex_unlock(&uld_mutex); | ||
| 2221 | return err; | ||
| 2216 | } | 2222 | } |
| 2217 | 2223 | ||
| 2218 | static void cxgb_down(struct adapter *adapter) | 2224 | static void cxgb_down(struct adapter *adapter) |
| @@ -2771,6 +2777,9 @@ void t4_fatal_err(struct adapter *adap) | |||
| 2771 | { | 2777 | { |
| 2772 | int port; | 2778 | int port; |
| 2773 | 2779 | ||
| 2780 | if (pci_channel_offline(adap->pdev)) | ||
| 2781 | return; | ||
| 2782 | |||
| 2774 | /* Disable the SGE since ULDs are going to free resources that | 2783 | /* Disable the SGE since ULDs are going to free resources that |
| 2775 | * could be exposed to the adapter. RDMA MWs for example... | 2784 | * could be exposed to the adapter. RDMA MWs for example... |
| 2776 | */ | 2785 | */ |
| @@ -3882,9 +3891,10 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, | |||
| 3882 | spin_lock(&adap->stats_lock); | 3891 | spin_lock(&adap->stats_lock); |
| 3883 | for_each_port(adap, i) { | 3892 | for_each_port(adap, i) { |
| 3884 | struct net_device *dev = adap->port[i]; | 3893 | struct net_device *dev = adap->port[i]; |
| 3885 | 3894 | if (dev) { | |
| 3886 | netif_device_detach(dev); | 3895 | netif_device_detach(dev); |
| 3887 | netif_carrier_off(dev); | 3896 | netif_carrier_off(dev); |
| 3897 | } | ||
| 3888 | } | 3898 | } |
| 3889 | spin_unlock(&adap->stats_lock); | 3899 | spin_unlock(&adap->stats_lock); |
| 3890 | disable_interrupts(adap); | 3900 | disable_interrupts(adap); |
| @@ -3963,12 +3973,13 @@ static void eeh_resume(struct pci_dev *pdev) | |||
| 3963 | rtnl_lock(); | 3973 | rtnl_lock(); |
| 3964 | for_each_port(adap, i) { | 3974 | for_each_port(adap, i) { |
| 3965 | struct net_device *dev = adap->port[i]; | 3975 | struct net_device *dev = adap->port[i]; |
| 3966 | 3976 | if (dev) { | |
| 3967 | if (netif_running(dev)) { | 3977 | if (netif_running(dev)) { |
| 3968 | link_start(dev); | 3978 | link_start(dev); |
| 3969 | cxgb_set_rxmode(dev); | 3979 | cxgb_set_rxmode(dev); |
| 3980 | } | ||
| 3981 | netif_device_attach(dev); | ||
| 3970 | } | 3982 | } |
| 3971 | netif_device_attach(dev); | ||
| 3972 | } | 3983 | } |
| 3973 | rtnl_unlock(); | 3984 | rtnl_unlock(); |
| 3974 | } | 3985 | } |
| @@ -4516,7 +4527,7 @@ static void dummy_setup(struct net_device *dev) | |||
| 4516 | /* Initialize the device structure. */ | 4527 | /* Initialize the device structure. */ |
| 4517 | dev->netdev_ops = &cxgb4_mgmt_netdev_ops; | 4528 | dev->netdev_ops = &cxgb4_mgmt_netdev_ops; |
| 4518 | dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; | 4529 | dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; |
| 4519 | dev->destructor = free_netdev; | 4530 | dev->needs_free_netdev = true; |
| 4520 | } | 4531 | } |
| 4521 | 4532 | ||
| 4522 | static int config_mgmt_dev(struct pci_dev *pdev) | 4533 | static int config_mgmt_dev(struct pci_dev *pdev) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index aded42b96f6d..3a34aa629f7d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
| @@ -4557,8 +4557,13 @@ void t4_intr_enable(struct adapter *adapter) | |||
| 4557 | */ | 4557 | */ |
| 4558 | void t4_intr_disable(struct adapter *adapter) | 4558 | void t4_intr_disable(struct adapter *adapter) |
| 4559 | { | 4559 | { |
| 4560 | u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A); | 4560 | u32 whoami, pf; |
| 4561 | u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? | 4561 | |
| 4562 | if (pci_channel_offline(adapter->pdev)) | ||
| 4563 | return; | ||
| 4564 | |||
| 4565 | whoami = t4_read_reg(adapter, PL_WHOAMI_A); | ||
| 4566 | pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? | ||
| 4562 | SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); | 4567 | SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); |
| 4563 | 4568 | ||
| 4564 | t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0); | 4569 | t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index 3549d3876278..f2d623a7aee0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h | |||
| @@ -37,7 +37,7 @@ | |||
| 37 | 37 | ||
| 38 | #define T4FW_VERSION_MAJOR 0x01 | 38 | #define T4FW_VERSION_MAJOR 0x01 |
| 39 | #define T4FW_VERSION_MINOR 0x10 | 39 | #define T4FW_VERSION_MINOR 0x10 |
| 40 | #define T4FW_VERSION_MICRO 0x2B | 40 | #define T4FW_VERSION_MICRO 0x2D |
| 41 | #define T4FW_VERSION_BUILD 0x00 | 41 | #define T4FW_VERSION_BUILD 0x00 |
| 42 | 42 | ||
| 43 | #define T4FW_MIN_VERSION_MAJOR 0x01 | 43 | #define T4FW_MIN_VERSION_MAJOR 0x01 |
| @@ -46,7 +46,7 @@ | |||
| 46 | 46 | ||
| 47 | #define T5FW_VERSION_MAJOR 0x01 | 47 | #define T5FW_VERSION_MAJOR 0x01 |
| 48 | #define T5FW_VERSION_MINOR 0x10 | 48 | #define T5FW_VERSION_MINOR 0x10 |
| 49 | #define T5FW_VERSION_MICRO 0x2B | 49 | #define T5FW_VERSION_MICRO 0x2D |
| 50 | #define T5FW_VERSION_BUILD 0x00 | 50 | #define T5FW_VERSION_BUILD 0x00 |
| 51 | 51 | ||
| 52 | #define T5FW_MIN_VERSION_MAJOR 0x00 | 52 | #define T5FW_MIN_VERSION_MAJOR 0x00 |
| @@ -55,7 +55,7 @@ | |||
| 55 | 55 | ||
| 56 | #define T6FW_VERSION_MAJOR 0x01 | 56 | #define T6FW_VERSION_MAJOR 0x01 |
| 57 | #define T6FW_VERSION_MINOR 0x10 | 57 | #define T6FW_VERSION_MINOR 0x10 |
| 58 | #define T6FW_VERSION_MICRO 0x2B | 58 | #define T6FW_VERSION_MICRO 0x2D |
| 59 | #define T6FW_VERSION_BUILD 0x00 | 59 | #define T6FW_VERSION_BUILD 0x00 |
| 60 | 60 | ||
| 61 | #define T6FW_MIN_VERSION_MAJOR 0x00 | 61 | #define T6FW_MIN_VERSION_MAJOR 0x00 |
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index e863ba74d005..8bb0db990c8f 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c | |||
| @@ -739,6 +739,8 @@ static int ethoc_open(struct net_device *dev) | |||
| 739 | if (ret) | 739 | if (ret) |
| 740 | return ret; | 740 | return ret; |
| 741 | 741 | ||
| 742 | napi_enable(&priv->napi); | ||
| 743 | |||
| 742 | ethoc_init_ring(priv, dev->mem_start); | 744 | ethoc_init_ring(priv, dev->mem_start); |
| 743 | ethoc_reset(priv); | 745 | ethoc_reset(priv); |
| 744 | 746 | ||
| @@ -754,7 +756,6 @@ static int ethoc_open(struct net_device *dev) | |||
| 754 | priv->old_duplex = -1; | 756 | priv->old_duplex = -1; |
| 755 | 757 | ||
| 756 | phy_start(dev->phydev); | 758 | phy_start(dev->phydev); |
| 757 | napi_enable(&priv->napi); | ||
| 758 | 759 | ||
| 759 | if (netif_msg_ifup(priv)) { | 760 | if (netif_msg_ifup(priv)) { |
| 760 | dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", | 761 | dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", |
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 9a520e4f0df9..290ad0563320 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | |||
| @@ -2647,7 +2647,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) | |||
| 2647 | priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ | 2647 | priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ |
| 2648 | 2648 | ||
| 2649 | /* device used for DMA mapping */ | 2649 | /* device used for DMA mapping */ |
| 2650 | arch_setup_dma_ops(dev, 0, 0, NULL, false); | 2650 | set_dma_ops(dev, get_dma_ops(&pdev->dev)); |
| 2651 | err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); | 2651 | err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); |
| 2652 | if (err) { | 2652 | if (err) { |
| 2653 | dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); | 2653 | dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); |
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig index dc0850b3b517..8870a9a798ca 100644 --- a/drivers/net/ethernet/freescale/fman/Kconfig +++ b/drivers/net/ethernet/freescale/fman/Kconfig | |||
| @@ -2,6 +2,7 @@ config FSL_FMAN | |||
| 2 | tristate "FMan support" | 2 | tristate "FMan support" |
| 3 | depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST | 3 | depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST |
| 4 | select GENERIC_ALLOCATOR | 4 | select GENERIC_ALLOCATOR |
| 5 | depends on HAS_DMA | ||
| 5 | select PHYLIB | 6 | select PHYLIB |
| 6 | default n | 7 | default n |
| 7 | help | 8 | help |
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 0b31f8502ada..6e67d22fd0d5 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c | |||
| @@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id, | |||
| 623 | goto no_mem; | 623 | goto no_mem; |
| 624 | } | 624 | } |
| 625 | 625 | ||
| 626 | set_dma_ops(&pdev->dev, get_dma_ops(priv->dev)); | ||
| 627 | |||
| 626 | ret = platform_device_add_data(pdev, &data, sizeof(data)); | 628 | ret = platform_device_add_data(pdev, &data, sizeof(data)); |
| 627 | if (ret) | 629 | if (ret) |
| 628 | goto err; | 630 | goto err; |
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 446c7b374ff5..a10de1e9c157 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c | |||
| @@ -381,7 +381,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) | |||
| 381 | { | 381 | { |
| 382 | const struct of_device_id *id = | 382 | const struct of_device_id *id = |
| 383 | of_match_device(fsl_pq_mdio_match, &pdev->dev); | 383 | of_match_device(fsl_pq_mdio_match, &pdev->dev); |
| 384 | const struct fsl_pq_mdio_data *data = id->data; | 384 | const struct fsl_pq_mdio_data *data; |
| 385 | struct device_node *np = pdev->dev.of_node; | 385 | struct device_node *np = pdev->dev.of_node; |
| 386 | struct resource res; | 386 | struct resource res; |
| 387 | struct device_node *tbi; | 387 | struct device_node *tbi; |
| @@ -389,6 +389,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) | |||
| 389 | struct mii_bus *new_bus; | 389 | struct mii_bus *new_bus; |
| 390 | int err; | 390 | int err; |
| 391 | 391 | ||
| 392 | if (!id) { | ||
| 393 | dev_err(&pdev->dev, "Failed to match device\n"); | ||
| 394 | return -ENODEV; | ||
| 395 | } | ||
| 396 | |||
| 397 | data = id->data; | ||
| 398 | |||
| 392 | dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); | 399 | dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); |
| 393 | 400 | ||
| 394 | new_bus = mdiobus_alloc_size(sizeof(*priv)); | 401 | new_bus = mdiobus_alloc_size(sizeof(*priv)); |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index b8fab149690f..e95795b3c841 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | |||
| @@ -288,9 +288,15 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) | |||
| 288 | 288 | ||
| 289 | /* Force 1000M Link, Default is 0x0200 */ | 289 | /* Force 1000M Link, Default is 0x0200 */ |
| 290 | phy_write(phy_dev, 7, 0x20C); | 290 | phy_write(phy_dev, 7, 0x20C); |
| 291 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); | ||
| 292 | 291 | ||
| 293 | /* Enable PHY loop-back */ | 292 | /* Powerup Fiber */ |
| 293 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 1); | ||
| 294 | val = phy_read(phy_dev, COPPER_CONTROL_REG); | ||
| 295 | val &= ~PHY_POWER_DOWN; | ||
| 296 | phy_write(phy_dev, COPPER_CONTROL_REG, val); | ||
| 297 | |||
| 298 | /* Enable Phy Loopback */ | ||
| 299 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); | ||
| 294 | val = phy_read(phy_dev, COPPER_CONTROL_REG); | 300 | val = phy_read(phy_dev, COPPER_CONTROL_REG); |
| 295 | val |= PHY_LOOP_BACK; | 301 | val |= PHY_LOOP_BACK; |
| 296 | val &= ~PHY_POWER_DOWN; | 302 | val &= ~PHY_POWER_DOWN; |
| @@ -299,6 +305,12 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) | |||
| 299 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); | 305 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); |
| 300 | phy_write(phy_dev, 1, 0x400); | 306 | phy_write(phy_dev, 1, 0x400); |
| 301 | phy_write(phy_dev, 7, 0x200); | 307 | phy_write(phy_dev, 7, 0x200); |
| 308 | |||
| 309 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 1); | ||
| 310 | val = phy_read(phy_dev, COPPER_CONTROL_REG); | ||
| 311 | val |= PHY_POWER_DOWN; | ||
| 312 | phy_write(phy_dev, COPPER_CONTROL_REG, val); | ||
| 313 | |||
| 302 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); | 314 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); |
| 303 | phy_write(phy_dev, 9, 0xF00); | 315 | phy_write(phy_dev, 9, 0xF00); |
| 304 | 316 | ||
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 508923f39ccf..259e69a52ec5 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c | |||
| @@ -343,6 +343,7 @@ static int emac_reset(struct emac_instance *dev) | |||
| 343 | { | 343 | { |
| 344 | struct emac_regs __iomem *p = dev->emacp; | 344 | struct emac_regs __iomem *p = dev->emacp; |
| 345 | int n = 20; | 345 | int n = 20; |
| 346 | bool __maybe_unused try_internal_clock = false; | ||
| 346 | 347 | ||
| 347 | DBG(dev, "reset" NL); | 348 | DBG(dev, "reset" NL); |
| 348 | 349 | ||
| @@ -355,6 +356,7 @@ static int emac_reset(struct emac_instance *dev) | |||
| 355 | } | 356 | } |
| 356 | 357 | ||
| 357 | #ifdef CONFIG_PPC_DCR_NATIVE | 358 | #ifdef CONFIG_PPC_DCR_NATIVE |
| 359 | do_retry: | ||
| 358 | /* | 360 | /* |
| 359 | * PPC460EX/GT Embedded Processor Advanced User's Manual | 361 | * PPC460EX/GT Embedded Processor Advanced User's Manual |
| 360 | * section 28.10.1 Mode Register 0 (EMACx_MR0) states: | 362 | * section 28.10.1 Mode Register 0 (EMACx_MR0) states: |
| @@ -362,10 +364,19 @@ static int emac_reset(struct emac_instance *dev) | |||
| 362 | * of the EMAC. If none is present, select the internal clock | 364 | * of the EMAC. If none is present, select the internal clock |
| 363 | * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). | 365 | * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). |
| 364 | * After a soft reset, select the external clock. | 366 | * After a soft reset, select the external clock. |
| 367 | * | ||
| 368 | * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the | ||
| 369 | * ethernet cable is not attached. This causes the reset to timeout | ||
| 370 | * and the PHY detection code in emac_init_phy() is unable to | ||
| 371 | * communicate and detect the AR8035-A PHY. As a result, the emac | ||
| 372 | * driver bails out early and the user has no ethernet. | ||
| 373 | * In order to stay compatible with existing configurations, the | ||
| 374 | * driver will temporarily switch to the internal clock, after | ||
| 375 | * the first reset fails. | ||
| 365 | */ | 376 | */ |
| 366 | if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { | 377 | if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { |
| 367 | if (dev->phy_address == 0xffffffff && | 378 | if (try_internal_clock || (dev->phy_address == 0xffffffff && |
| 368 | dev->phy_map == 0xffffffff) { | 379 | dev->phy_map == 0xffffffff)) { |
| 369 | /* No PHY: select internal loop clock before reset */ | 380 | /* No PHY: select internal loop clock before reset */ |
| 370 | dcri_clrset(SDR0, SDR0_ETH_CFG, | 381 | dcri_clrset(SDR0, SDR0_ETH_CFG, |
| 371 | 0, SDR0_ETH_CFG_ECS << dev->cell_index); | 382 | 0, SDR0_ETH_CFG_ECS << dev->cell_index); |
| @@ -383,8 +394,15 @@ static int emac_reset(struct emac_instance *dev) | |||
| 383 | 394 | ||
| 384 | #ifdef CONFIG_PPC_DCR_NATIVE | 395 | #ifdef CONFIG_PPC_DCR_NATIVE |
| 385 | if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { | 396 | if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { |
| 386 | if (dev->phy_address == 0xffffffff && | 397 | if (!n && !try_internal_clock) { |
| 387 | dev->phy_map == 0xffffffff) { | 398 | /* first attempt has timed out. */ |
| 399 | n = 20; | ||
| 400 | try_internal_clock = true; | ||
| 401 | goto do_retry; | ||
| 402 | } | ||
| 403 | |||
| 404 | if (try_internal_clock || (dev->phy_address == 0xffffffff && | ||
| 405 | dev->phy_map == 0xffffffff)) { | ||
| 388 | /* No PHY: restore external clock source after reset */ | 406 | /* No PHY: restore external clock source after reset */ |
| 389 | dcri_clrset(SDR0, SDR0_ETH_CFG, | 407 | dcri_clrset(SDR0, SDR0_ETH_CFG, |
| 390 | SDR0_ETH_CFG_ECS << dev->cell_index, 0); | 408 | SDR0_ETH_CFG_ECS << dev->cell_index, 0); |
| @@ -2460,20 +2478,24 @@ static int emac_mii_bus_reset(struct mii_bus *bus) | |||
| 2460 | return emac_reset(dev); | 2478 | return emac_reset(dev); |
| 2461 | } | 2479 | } |
| 2462 | 2480 | ||
| 2481 | static int emac_mdio_phy_start_aneg(struct mii_phy *phy, | ||
| 2482 | struct phy_device *phy_dev) | ||
| 2483 | { | ||
| 2484 | phy_dev->autoneg = phy->autoneg; | ||
| 2485 | phy_dev->speed = phy->speed; | ||
| 2486 | phy_dev->duplex = phy->duplex; | ||
| 2487 | phy_dev->advertising = phy->advertising; | ||
| 2488 | return phy_start_aneg(phy_dev); | ||
| 2489 | } | ||
| 2490 | |||
| 2463 | static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise) | 2491 | static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise) |
| 2464 | { | 2492 | { |
| 2465 | struct net_device *ndev = phy->dev; | 2493 | struct net_device *ndev = phy->dev; |
| 2466 | struct emac_instance *dev = netdev_priv(ndev); | 2494 | struct emac_instance *dev = netdev_priv(ndev); |
| 2467 | 2495 | ||
| 2468 | dev->phy.autoneg = AUTONEG_ENABLE; | ||
| 2469 | dev->phy.speed = SPEED_1000; | ||
| 2470 | dev->phy.duplex = DUPLEX_FULL; | ||
| 2471 | dev->phy.advertising = advertise; | ||
| 2472 | phy->autoneg = AUTONEG_ENABLE; | 2496 | phy->autoneg = AUTONEG_ENABLE; |
| 2473 | phy->speed = dev->phy.speed; | ||
| 2474 | phy->duplex = dev->phy.duplex; | ||
| 2475 | phy->advertising = advertise; | 2497 | phy->advertising = advertise; |
| 2476 | return phy_start_aneg(dev->phy_dev); | 2498 | return emac_mdio_phy_start_aneg(phy, dev->phy_dev); |
| 2477 | } | 2499 | } |
| 2478 | 2500 | ||
| 2479 | static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) | 2501 | static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) |
| @@ -2481,13 +2503,10 @@ static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) | |||
| 2481 | struct net_device *ndev = phy->dev; | 2503 | struct net_device *ndev = phy->dev; |
| 2482 | struct emac_instance *dev = netdev_priv(ndev); | 2504 | struct emac_instance *dev = netdev_priv(ndev); |
| 2483 | 2505 | ||
| 2484 | dev->phy.autoneg = AUTONEG_DISABLE; | ||
| 2485 | dev->phy.speed = speed; | ||
| 2486 | dev->phy.duplex = fd; | ||
| 2487 | phy->autoneg = AUTONEG_DISABLE; | 2506 | phy->autoneg = AUTONEG_DISABLE; |
| 2488 | phy->speed = speed; | 2507 | phy->speed = speed; |
| 2489 | phy->duplex = fd; | 2508 | phy->duplex = fd; |
| 2490 | return phy_start_aneg(dev->phy_dev); | 2509 | return emac_mdio_phy_start_aneg(phy, dev->phy_dev); |
| 2491 | } | 2510 | } |
| 2492 | 2511 | ||
| 2493 | static int emac_mdio_poll_link(struct mii_phy *phy) | 2512 | static int emac_mdio_poll_link(struct mii_phy *phy) |
| @@ -2509,16 +2528,17 @@ static int emac_mdio_read_link(struct mii_phy *phy) | |||
| 2509 | { | 2528 | { |
| 2510 | struct net_device *ndev = phy->dev; | 2529 | struct net_device *ndev = phy->dev; |
| 2511 | struct emac_instance *dev = netdev_priv(ndev); | 2530 | struct emac_instance *dev = netdev_priv(ndev); |
| 2531 | struct phy_device *phy_dev = dev->phy_dev; | ||
| 2512 | int res; | 2532 | int res; |
| 2513 | 2533 | ||
| 2514 | res = phy_read_status(dev->phy_dev); | 2534 | res = phy_read_status(phy_dev); |
| 2515 | if (res) | 2535 | if (res) |
| 2516 | return res; | 2536 | return res; |
| 2517 | 2537 | ||
| 2518 | dev->phy.speed = phy->speed; | 2538 | phy->speed = phy_dev->speed; |
| 2519 | dev->phy.duplex = phy->duplex; | 2539 | phy->duplex = phy_dev->duplex; |
| 2520 | dev->phy.pause = phy->pause; | 2540 | phy->pause = phy_dev->pause; |
| 2521 | dev->phy.asym_pause = phy->asym_pause; | 2541 | phy->asym_pause = phy_dev->asym_pause; |
| 2522 | return 0; | 2542 | return 0; |
| 2523 | } | 2543 | } |
| 2524 | 2544 | ||
| @@ -2528,13 +2548,6 @@ static int emac_mdio_init_phy(struct mii_phy *phy) | |||
| 2528 | struct emac_instance *dev = netdev_priv(ndev); | 2548 | struct emac_instance *dev = netdev_priv(ndev); |
| 2529 | 2549 | ||
| 2530 | phy_start(dev->phy_dev); | 2550 | phy_start(dev->phy_dev); |
| 2531 | dev->phy.autoneg = phy->autoneg; | ||
| 2532 | dev->phy.speed = phy->speed; | ||
| 2533 | dev->phy.duplex = phy->duplex; | ||
| 2534 | dev->phy.advertising = phy->advertising; | ||
| 2535 | dev->phy.pause = phy->pause; | ||
| 2536 | dev->phy.asym_pause = phy->asym_pause; | ||
| 2537 | |||
| 2538 | return phy_init_hw(dev->phy_dev); | 2551 | return phy_init_hw(dev->phy_dev); |
| 2539 | } | 2552 | } |
| 2540 | 2553 | ||
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 4f2d329dba99..c0fbeb387db4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -81,7 +81,7 @@ | |||
| 81 | static const char ibmvnic_driver_name[] = "ibmvnic"; | 81 | static const char ibmvnic_driver_name[] = "ibmvnic"; |
| 82 | static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; | 82 | static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; |
| 83 | 83 | ||
| 84 | MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>"); | 84 | MODULE_AUTHOR("Santiago Leon"); |
| 85 | MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); | 85 | MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); |
| 86 | MODULE_LICENSE("GPL"); | 86 | MODULE_LICENSE("GPL"); |
| 87 | MODULE_VERSION(IBMVNIC_DRIVER_VERSION); | 87 | MODULE_VERSION(IBMVNIC_DRIVER_VERSION); |
| @@ -1468,6 +1468,11 @@ static void ibmvnic_netpoll_controller(struct net_device *dev) | |||
| 1468 | } | 1468 | } |
| 1469 | #endif | 1469 | #endif |
| 1470 | 1470 | ||
| 1471 | static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) | ||
| 1472 | { | ||
| 1473 | return -EOPNOTSUPP; | ||
| 1474 | } | ||
| 1475 | |||
| 1471 | static const struct net_device_ops ibmvnic_netdev_ops = { | 1476 | static const struct net_device_ops ibmvnic_netdev_ops = { |
| 1472 | .ndo_open = ibmvnic_open, | 1477 | .ndo_open = ibmvnic_open, |
| 1473 | .ndo_stop = ibmvnic_close, | 1478 | .ndo_stop = ibmvnic_close, |
| @@ -1479,6 +1484,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = { | |||
| 1479 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1484 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1480 | .ndo_poll_controller = ibmvnic_netpoll_controller, | 1485 | .ndo_poll_controller = ibmvnic_netpoll_controller, |
| 1481 | #endif | 1486 | #endif |
| 1487 | .ndo_change_mtu = ibmvnic_change_mtu, | ||
| 1482 | }; | 1488 | }; |
| 1483 | 1489 | ||
| 1484 | /* ethtool functions */ | 1490 | /* ethtool functions */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index cdde3cc28fb5..44d9610f7a15 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h | |||
| @@ -399,6 +399,7 @@ struct i40e_pf { | |||
| 399 | #define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) | 399 | #define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) |
| 400 | #define I40E_FLAG_MSI_ENABLED BIT_ULL(2) | 400 | #define I40E_FLAG_MSI_ENABLED BIT_ULL(2) |
| 401 | #define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) | 401 | #define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) |
| 402 | #define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(4) | ||
| 402 | #define I40E_FLAG_RSS_ENABLED BIT_ULL(6) | 403 | #define I40E_FLAG_RSS_ENABLED BIT_ULL(6) |
| 403 | #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) | 404 | #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) |
| 404 | #define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) | 405 | #define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 7a8eb486b9ea..894c8e57ba00 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
| @@ -224,7 +224,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { | |||
| 224 | I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), | 224 | I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), |
| 225 | I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), | 225 | I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), |
| 226 | I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), | 226 | I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), |
| 227 | I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0), | 227 | I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0), |
| 228 | I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), | 228 | I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), |
| 229 | }; | 229 | }; |
| 230 | 230 | ||
| @@ -4092,7 +4092,7 @@ flags_complete: | |||
| 4092 | 4092 | ||
| 4093 | /* Only allow ATR evict on hardware that is capable of handling it */ | 4093 | /* Only allow ATR evict on hardware that is capable of handling it */ |
| 4094 | if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) | 4094 | if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) |
| 4095 | pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; | 4095 | pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_ENABLED; |
| 4096 | 4096 | ||
| 4097 | if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { | 4097 | if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { |
| 4098 | u16 sw_flags = 0, valid_flags = 0; | 4098 | u16 sw_flags = 0, valid_flags = 0; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index d5c9c9e06ff5..a7a4b28b4144 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
| @@ -295,7 +295,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) | |||
| 295 | **/ | 295 | **/ |
| 296 | void i40e_service_event_schedule(struct i40e_pf *pf) | 296 | void i40e_service_event_schedule(struct i40e_pf *pf) |
| 297 | { | 297 | { |
| 298 | if (!test_bit(__I40E_VSI_DOWN, pf->state) && | 298 | if (!test_bit(__I40E_DOWN, pf->state) && |
| 299 | !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) | 299 | !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) |
| 300 | queue_work(i40e_wq, &pf->service_task); | 300 | queue_work(i40e_wq, &pf->service_task); |
| 301 | } | 301 | } |
| @@ -3611,7 +3611,7 @@ static irqreturn_t i40e_intr(int irq, void *data) | |||
| 3611 | * this is not a performance path and napi_schedule() | 3611 | * this is not a performance path and napi_schedule() |
| 3612 | * can deal with rescheduling. | 3612 | * can deal with rescheduling. |
| 3613 | */ | 3613 | */ |
| 3614 | if (!test_bit(__I40E_VSI_DOWN, pf->state)) | 3614 | if (!test_bit(__I40E_DOWN, pf->state)) |
| 3615 | napi_schedule_irqoff(&q_vector->napi); | 3615 | napi_schedule_irqoff(&q_vector->napi); |
| 3616 | } | 3616 | } |
| 3617 | 3617 | ||
| @@ -3687,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data) | |||
| 3687 | enable_intr: | 3687 | enable_intr: |
| 3688 | /* re-enable interrupt causes */ | 3688 | /* re-enable interrupt causes */ |
| 3689 | wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); | 3689 | wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); |
| 3690 | if (!test_bit(__I40E_VSI_DOWN, pf->state)) { | 3690 | if (!test_bit(__I40E_DOWN, pf->state)) { |
| 3691 | i40e_service_event_schedule(pf); | 3691 | i40e_service_event_schedule(pf); |
| 3692 | i40e_irq_dynamic_enable_icr0(pf, false); | 3692 | i40e_irq_dynamic_enable_icr0(pf, false); |
| 3693 | } | 3693 | } |
| @@ -6203,7 +6203,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) | |||
| 6203 | { | 6203 | { |
| 6204 | 6204 | ||
| 6205 | /* if interface is down do nothing */ | 6205 | /* if interface is down do nothing */ |
| 6206 | if (test_bit(__I40E_VSI_DOWN, pf->state)) | 6206 | if (test_bit(__I40E_DOWN, pf->state)) |
| 6207 | return; | 6207 | return; |
| 6208 | 6208 | ||
| 6209 | if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) | 6209 | if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) |
| @@ -6344,7 +6344,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) | |||
| 6344 | int i; | 6344 | int i; |
| 6345 | 6345 | ||
| 6346 | /* if interface is down do nothing */ | 6346 | /* if interface is down do nothing */ |
| 6347 | if (test_bit(__I40E_VSI_DOWN, pf->state) || | 6347 | if (test_bit(__I40E_DOWN, pf->state) || |
| 6348 | test_bit(__I40E_CONFIG_BUSY, pf->state)) | 6348 | test_bit(__I40E_CONFIG_BUSY, pf->state)) |
| 6349 | return; | 6349 | return; |
| 6350 | 6350 | ||
| @@ -6399,9 +6399,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf) | |||
| 6399 | reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); | 6399 | reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); |
| 6400 | clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); | 6400 | clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); |
| 6401 | } | 6401 | } |
| 6402 | if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) { | 6402 | if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) { |
| 6403 | reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED); | 6403 | reset_flags |= BIT(__I40E_DOWN_REQUESTED); |
| 6404 | clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state); | 6404 | clear_bit(__I40E_DOWN_REQUESTED, pf->state); |
| 6405 | } | 6405 | } |
| 6406 | 6406 | ||
| 6407 | /* If there's a recovery already waiting, it takes | 6407 | /* If there's a recovery already waiting, it takes |
| @@ -6415,7 +6415,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf) | |||
| 6415 | 6415 | ||
| 6416 | /* If we're already down or resetting, just bail */ | 6416 | /* If we're already down or resetting, just bail */ |
| 6417 | if (reset_flags && | 6417 | if (reset_flags && |
| 6418 | !test_bit(__I40E_VSI_DOWN, pf->state) && | 6418 | !test_bit(__I40E_DOWN, pf->state) && |
| 6419 | !test_bit(__I40E_CONFIG_BUSY, pf->state)) { | 6419 | !test_bit(__I40E_CONFIG_BUSY, pf->state)) { |
| 6420 | rtnl_lock(); | 6420 | rtnl_lock(); |
| 6421 | i40e_do_reset(pf, reset_flags, true); | 6421 | i40e_do_reset(pf, reset_flags, true); |
| @@ -7002,7 +7002,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) | |||
| 7002 | u32 val; | 7002 | u32 val; |
| 7003 | int v; | 7003 | int v; |
| 7004 | 7004 | ||
| 7005 | if (test_bit(__I40E_VSI_DOWN, pf->state)) | 7005 | if (test_bit(__I40E_DOWN, pf->state)) |
| 7006 | goto clear_recovery; | 7006 | goto clear_recovery; |
| 7007 | dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); | 7007 | dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); |
| 7008 | 7008 | ||
| @@ -8821,11 +8821,12 @@ static int i40e_sw_init(struct i40e_pf *pf) | |||
| 8821 | (pf->hw.aq.api_min_ver > 4))) { | 8821 | (pf->hw.aq.api_min_ver > 4))) { |
| 8822 | /* Supported in FW API version higher than 1.4 */ | 8822 | /* Supported in FW API version higher than 1.4 */ |
| 8823 | pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; | 8823 | pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; |
| 8824 | pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; | ||
| 8825 | } else { | ||
| 8826 | pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; | ||
| 8827 | } | 8824 | } |
| 8828 | 8825 | ||
| 8826 | /* Enable HW ATR eviction if possible */ | ||
| 8827 | if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) | ||
| 8828 | pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; | ||
| 8829 | |||
| 8829 | pf->eeprom_version = 0xDEAD; | 8830 | pf->eeprom_version = 0xDEAD; |
| 8830 | pf->lan_veb = I40E_NO_VEB; | 8831 | pf->lan_veb = I40E_NO_VEB; |
| 8831 | pf->lan_vsi = I40E_NO_VSI; | 8832 | pf->lan_vsi = I40E_NO_VSI; |
| @@ -9767,7 +9768,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) | |||
| 9767 | return -ENODEV; | 9768 | return -ENODEV; |
| 9768 | } | 9769 | } |
| 9769 | if (vsi == pf->vsi[pf->lan_vsi] && | 9770 | if (vsi == pf->vsi[pf->lan_vsi] && |
| 9770 | !test_bit(__I40E_VSI_DOWN, pf->state)) { | 9771 | !test_bit(__I40E_DOWN, pf->state)) { |
| 9771 | dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); | 9772 | dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); |
| 9772 | return -ENODEV; | 9773 | return -ENODEV; |
| 9773 | } | 9774 | } |
| @@ -11003,7 +11004,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 11003 | } | 11004 | } |
| 11004 | pf->next_vsi = 0; | 11005 | pf->next_vsi = 0; |
| 11005 | pf->pdev = pdev; | 11006 | pf->pdev = pdev; |
| 11006 | set_bit(__I40E_VSI_DOWN, pf->state); | 11007 | set_bit(__I40E_DOWN, pf->state); |
| 11007 | 11008 | ||
| 11008 | hw = &pf->hw; | 11009 | hw = &pf->hw; |
| 11009 | hw->back = pf; | 11010 | hw->back = pf; |
| @@ -11293,7 +11294,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 11293 | * before setting up the misc vector or we get a race and the vector | 11294 | * before setting up the misc vector or we get a race and the vector |
| 11294 | * ends up disabled forever. | 11295 | * ends up disabled forever. |
| 11295 | */ | 11296 | */ |
| 11296 | clear_bit(__I40E_VSI_DOWN, pf->state); | 11297 | clear_bit(__I40E_DOWN, pf->state); |
| 11297 | 11298 | ||
| 11298 | /* In case of MSIX we are going to setup the misc vector right here | 11299 | /* In case of MSIX we are going to setup the misc vector right here |
| 11299 | * to handle admin queue events etc. In case of legacy and MSI | 11300 | * to handle admin queue events etc. In case of legacy and MSI |
| @@ -11448,7 +11449,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 11448 | 11449 | ||
| 11449 | /* Unwind what we've done if something failed in the setup */ | 11450 | /* Unwind what we've done if something failed in the setup */ |
| 11450 | err_vsis: | 11451 | err_vsis: |
| 11451 | set_bit(__I40E_VSI_DOWN, pf->state); | 11452 | set_bit(__I40E_DOWN, pf->state); |
| 11452 | i40e_clear_interrupt_scheme(pf); | 11453 | i40e_clear_interrupt_scheme(pf); |
| 11453 | kfree(pf->vsi); | 11454 | kfree(pf->vsi); |
| 11454 | err_switch_setup: | 11455 | err_switch_setup: |
| @@ -11500,7 +11501,7 @@ static void i40e_remove(struct pci_dev *pdev) | |||
| 11500 | 11501 | ||
| 11501 | /* no more scheduling of any task */ | 11502 | /* no more scheduling of any task */ |
| 11502 | set_bit(__I40E_SUSPENDED, pf->state); | 11503 | set_bit(__I40E_SUSPENDED, pf->state); |
| 11503 | set_bit(__I40E_VSI_DOWN, pf->state); | 11504 | set_bit(__I40E_DOWN, pf->state); |
| 11504 | if (pf->service_timer.data) | 11505 | if (pf->service_timer.data) |
| 11505 | del_timer_sync(&pf->service_timer); | 11506 | del_timer_sync(&pf->service_timer); |
| 11506 | if (pf->service_task.func) | 11507 | if (pf->service_task.func) |
| @@ -11740,7 +11741,7 @@ static void i40e_shutdown(struct pci_dev *pdev) | |||
| 11740 | struct i40e_hw *hw = &pf->hw; | 11741 | struct i40e_hw *hw = &pf->hw; |
| 11741 | 11742 | ||
| 11742 | set_bit(__I40E_SUSPENDED, pf->state); | 11743 | set_bit(__I40E_SUSPENDED, pf->state); |
| 11743 | set_bit(__I40E_VSI_DOWN, pf->state); | 11744 | set_bit(__I40E_DOWN, pf->state); |
| 11744 | rtnl_lock(); | 11745 | rtnl_lock(); |
| 11745 | i40e_prep_for_reset(pf, true); | 11746 | i40e_prep_for_reset(pf, true); |
| 11746 | rtnl_unlock(); | 11747 | rtnl_unlock(); |
| @@ -11789,7 +11790,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) | |||
| 11789 | int retval = 0; | 11790 | int retval = 0; |
| 11790 | 11791 | ||
| 11791 | set_bit(__I40E_SUSPENDED, pf->state); | 11792 | set_bit(__I40E_SUSPENDED, pf->state); |
| 11792 | set_bit(__I40E_VSI_DOWN, pf->state); | 11793 | set_bit(__I40E_DOWN, pf->state); |
| 11793 | 11794 | ||
| 11794 | if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) | 11795 | if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) |
| 11795 | i40e_enable_mc_magic_wake(pf); | 11796 | i40e_enable_mc_magic_wake(pf); |
| @@ -11841,7 +11842,7 @@ static int i40e_resume(struct pci_dev *pdev) | |||
| 11841 | 11842 | ||
| 11842 | /* handling the reset will rebuild the device state */ | 11843 | /* handling the reset will rebuild the device state */ |
| 11843 | if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { | 11844 | if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { |
| 11844 | clear_bit(__I40E_VSI_DOWN, pf->state); | 11845 | clear_bit(__I40E_DOWN, pf->state); |
| 11845 | rtnl_lock(); | 11846 | rtnl_lock(); |
| 11846 | i40e_reset_and_rebuild(pf, false, true); | 11847 | i40e_reset_and_rebuild(pf, false, true); |
| 11847 | rtnl_unlock(); | 11848 | rtnl_unlock(); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 29321a6167a6..77115c25d96f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
| @@ -1854,7 +1854,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, | |||
| 1854 | #if (PAGE_SIZE < 8192) | 1854 | #if (PAGE_SIZE < 8192) |
| 1855 | unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; | 1855 | unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; |
| 1856 | #else | 1856 | #else |
| 1857 | unsigned int truesize = SKB_DATA_ALIGN(size); | 1857 | unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + |
| 1858 | SKB_DATA_ALIGN(I40E_SKB_PAD + size); | ||
| 1858 | #endif | 1859 | #endif |
| 1859 | struct sk_buff *skb; | 1860 | struct sk_buff *skb; |
| 1860 | 1861 | ||
| @@ -2340,7 +2341,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
| 2340 | /* Due to lack of space, no more new filters can be programmed */ | 2341 | /* Due to lack of space, no more new filters can be programmed */ |
| 2341 | if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) | 2342 | if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) |
| 2342 | return; | 2343 | return; |
| 2343 | if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { | 2344 | if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { |
| 2344 | /* HW ATR eviction will take care of removing filters on FIN | 2345 | /* HW ATR eviction will take care of removing filters on FIN |
| 2345 | * and RST packets. | 2346 | * and RST packets. |
| 2346 | */ | 2347 | */ |
| @@ -2402,7 +2403,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
| 2402 | I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & | 2403 | I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & |
| 2403 | I40E_TXD_FLTR_QW1_CNTINDEX_MASK; | 2404 | I40E_TXD_FLTR_QW1_CNTINDEX_MASK; |
| 2404 | 2405 | ||
| 2405 | if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) | 2406 | if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) |
| 2406 | dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; | 2407 | dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; |
| 2407 | 2408 | ||
| 2408 | fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); | 2409 | fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 95c23fbaa211..0fb38ca78900 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | |||
| @@ -3017,10 +3017,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, | |||
| 3017 | VLAN_VID_MASK)); | 3017 | VLAN_VID_MASK)); |
| 3018 | } | 3018 | } |
| 3019 | 3019 | ||
| 3020 | spin_unlock_bh(&vsi->mac_filter_hash_lock); | ||
| 3020 | if (vlan_id || qos) | 3021 | if (vlan_id || qos) |
| 3021 | ret = i40e_vsi_add_pvid(vsi, vlanprio); | 3022 | ret = i40e_vsi_add_pvid(vsi, vlanprio); |
| 3022 | else | 3023 | else |
| 3023 | i40e_vsi_remove_pvid(vsi); | 3024 | i40e_vsi_remove_pvid(vsi); |
| 3025 | spin_lock_bh(&vsi->mac_filter_hash_lock); | ||
| 3024 | 3026 | ||
| 3025 | if (vlan_id) { | 3027 | if (vlan_id) { |
| 3026 | dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", | 3028 | dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index dfe241a12ad0..12b02e530503 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c | |||
| @@ -1190,7 +1190,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, | |||
| 1190 | #if (PAGE_SIZE < 8192) | 1190 | #if (PAGE_SIZE < 8192) |
| 1191 | unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; | 1191 | unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; |
| 1192 | #else | 1192 | #else |
| 1193 | unsigned int truesize = SKB_DATA_ALIGN(size); | 1193 | unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + |
| 1194 | SKB_DATA_ALIGN(I40E_SKB_PAD + size); | ||
| 1194 | #endif | 1195 | #endif |
| 1195 | struct sk_buff *skb; | 1196 | struct sk_buff *skb; |
| 1196 | 1197 | ||
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 9b875d776b29..33c901622ed5 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
| @@ -3719,7 +3719,7 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, | |||
| 3719 | dma_addr_t *dma_addr, | 3719 | dma_addr_t *dma_addr, |
| 3720 | phys_addr_t *phys_addr) | 3720 | phys_addr_t *phys_addr) |
| 3721 | { | 3721 | { |
| 3722 | int cpu = smp_processor_id(); | 3722 | int cpu = get_cpu(); |
| 3723 | 3723 | ||
| 3724 | *dma_addr = mvpp2_percpu_read(priv, cpu, | 3724 | *dma_addr = mvpp2_percpu_read(priv, cpu, |
| 3725 | MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); | 3725 | MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); |
| @@ -3740,6 +3740,8 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, | |||
| 3740 | if (sizeof(phys_addr_t) == 8) | 3740 | if (sizeof(phys_addr_t) == 8) |
| 3741 | *phys_addr |= (u64)phys_addr_highbits << 32; | 3741 | *phys_addr |= (u64)phys_addr_highbits << 32; |
| 3742 | } | 3742 | } |
| 3743 | |||
| 3744 | put_cpu(); | ||
| 3743 | } | 3745 | } |
| 3744 | 3746 | ||
| 3745 | /* Free all buffers from the pool */ | 3747 | /* Free all buffers from the pool */ |
| @@ -3920,18 +3922,12 @@ static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) | |||
| 3920 | return bm; | 3922 | return bm; |
| 3921 | } | 3923 | } |
| 3922 | 3924 | ||
| 3923 | /* Get pool number from a BM cookie */ | ||
| 3924 | static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) | ||
| 3925 | { | ||
| 3926 | return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; | ||
| 3927 | } | ||
| 3928 | |||
| 3929 | /* Release buffer to BM */ | 3925 | /* Release buffer to BM */ |
| 3930 | static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, | 3926 | static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, |
| 3931 | dma_addr_t buf_dma_addr, | 3927 | dma_addr_t buf_dma_addr, |
| 3932 | phys_addr_t buf_phys_addr) | 3928 | phys_addr_t buf_phys_addr) |
| 3933 | { | 3929 | { |
| 3934 | int cpu = smp_processor_id(); | 3930 | int cpu = get_cpu(); |
| 3935 | 3931 | ||
| 3936 | if (port->priv->hw_version == MVPP22) { | 3932 | if (port->priv->hw_version == MVPP22) { |
| 3937 | u32 val = 0; | 3933 | u32 val = 0; |
| @@ -3958,15 +3954,15 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, | |||
| 3958 | MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); | 3954 | MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); |
| 3959 | mvpp2_percpu_write(port->priv, cpu, | 3955 | mvpp2_percpu_write(port->priv, cpu, |
| 3960 | MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); | 3956 | MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); |
| 3957 | |||
| 3958 | put_cpu(); | ||
| 3961 | } | 3959 | } |
| 3962 | 3960 | ||
| 3963 | /* Refill BM pool */ | 3961 | /* Refill BM pool */ |
| 3964 | static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, | 3962 | static void mvpp2_pool_refill(struct mvpp2_port *port, int pool, |
| 3965 | dma_addr_t dma_addr, | 3963 | dma_addr_t dma_addr, |
| 3966 | phys_addr_t phys_addr) | 3964 | phys_addr_t phys_addr) |
| 3967 | { | 3965 | { |
| 3968 | int pool = mvpp2_bm_cookie_pool_get(bm); | ||
| 3969 | |||
| 3970 | mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); | 3966 | mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); |
| 3971 | } | 3967 | } |
| 3972 | 3968 | ||
| @@ -4186,8 +4182,6 @@ static void mvpp22_port_mii_set(struct mvpp2_port *port) | |||
| 4186 | { | 4182 | { |
| 4187 | u32 val; | 4183 | u32 val; |
| 4188 | 4184 | ||
| 4189 | return; | ||
| 4190 | |||
| 4191 | /* Only GOP port 0 has an XLG MAC */ | 4185 | /* Only GOP port 0 has an XLG MAC */ |
| 4192 | if (port->gop_id == 0) { | 4186 | if (port->gop_id == 0) { |
| 4193 | val = readl(port->base + MVPP22_XLG_CTRL3_REG); | 4187 | val = readl(port->base + MVPP22_XLG_CTRL3_REG); |
| @@ -4515,21 +4509,6 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port, | |||
| 4515 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); | 4509 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); |
| 4516 | } | 4510 | } |
| 4517 | 4511 | ||
| 4518 | /* Obtain BM cookie information from descriptor */ | ||
| 4519 | static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, | ||
| 4520 | struct mvpp2_rx_desc *rx_desc) | ||
| 4521 | { | ||
| 4522 | int cpu = smp_processor_id(); | ||
| 4523 | int pool; | ||
| 4524 | |||
| 4525 | pool = (mvpp2_rxdesc_status_get(port, rx_desc) & | ||
| 4526 | MVPP2_RXD_BM_POOL_ID_MASK) >> | ||
| 4527 | MVPP2_RXD_BM_POOL_ID_OFFS; | ||
| 4528 | |||
| 4529 | return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | | ||
| 4530 | ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); | ||
| 4531 | } | ||
| 4532 | |||
| 4533 | /* Tx descriptors helper methods */ | 4512 | /* Tx descriptors helper methods */ |
| 4534 | 4513 | ||
| 4535 | /* Get pointer to next Tx descriptor to be processed (send) by HW */ | 4514 | /* Get pointer to next Tx descriptor to be processed (send) by HW */ |
| @@ -4757,7 +4736,7 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) | |||
| 4757 | static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, | 4736 | static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, |
| 4758 | struct mvpp2_rx_queue *rxq) | 4737 | struct mvpp2_rx_queue *rxq) |
| 4759 | { | 4738 | { |
| 4760 | int cpu = smp_processor_id(); | 4739 | int cpu = get_cpu(); |
| 4761 | 4740 | ||
| 4762 | if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) | 4741 | if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) |
| 4763 | rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; | 4742 | rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; |
| @@ -4765,6 +4744,8 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, | |||
| 4765 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); | 4744 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); |
| 4766 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, | 4745 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, |
| 4767 | rxq->pkts_coal); | 4746 | rxq->pkts_coal); |
| 4747 | |||
| 4748 | put_cpu(); | ||
| 4768 | } | 4749 | } |
| 4769 | 4750 | ||
| 4770 | static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) | 4751 | static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) |
| @@ -4945,7 +4926,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, | |||
| 4945 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); | 4926 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); |
| 4946 | 4927 | ||
| 4947 | /* Set Rx descriptors queue starting address - indirect access */ | 4928 | /* Set Rx descriptors queue starting address - indirect access */ |
| 4948 | cpu = smp_processor_id(); | 4929 | cpu = get_cpu(); |
| 4949 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); | 4930 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); |
| 4950 | if (port->priv->hw_version == MVPP21) | 4931 | if (port->priv->hw_version == MVPP21) |
| 4951 | rxq_dma = rxq->descs_dma; | 4932 | rxq_dma = rxq->descs_dma; |
| @@ -4954,6 +4935,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, | |||
| 4954 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); | 4935 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); |
| 4955 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); | 4936 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); |
| 4956 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); | 4937 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); |
| 4938 | put_cpu(); | ||
| 4957 | 4939 | ||
| 4958 | /* Set Offset */ | 4940 | /* Set Offset */ |
| 4959 | mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); | 4941 | mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); |
| @@ -4980,9 +4962,13 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, | |||
| 4980 | 4962 | ||
| 4981 | for (i = 0; i < rx_received; i++) { | 4963 | for (i = 0; i < rx_received; i++) { |
| 4982 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); | 4964 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); |
| 4983 | u32 bm = mvpp2_bm_cookie_build(port, rx_desc); | 4965 | u32 status = mvpp2_rxdesc_status_get(port, rx_desc); |
| 4966 | int pool; | ||
| 4967 | |||
| 4968 | pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> | ||
| 4969 | MVPP2_RXD_BM_POOL_ID_OFFS; | ||
| 4984 | 4970 | ||
| 4985 | mvpp2_pool_refill(port, bm, | 4971 | mvpp2_pool_refill(port, pool, |
| 4986 | mvpp2_rxdesc_dma_addr_get(port, rx_desc), | 4972 | mvpp2_rxdesc_dma_addr_get(port, rx_desc), |
| 4987 | mvpp2_rxdesc_cookie_get(port, rx_desc)); | 4973 | mvpp2_rxdesc_cookie_get(port, rx_desc)); |
| 4988 | } | 4974 | } |
| @@ -5012,10 +4998,11 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port, | |||
| 5012 | * free descriptor number | 4998 | * free descriptor number |
| 5013 | */ | 4999 | */ |
| 5014 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); | 5000 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); |
| 5015 | cpu = smp_processor_id(); | 5001 | cpu = get_cpu(); |
| 5016 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); | 5002 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); |
| 5017 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); | 5003 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); |
| 5018 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); | 5004 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); |
| 5005 | put_cpu(); | ||
| 5019 | } | 5006 | } |
| 5020 | 5007 | ||
| 5021 | /* Create and initialize a Tx queue */ | 5008 | /* Create and initialize a Tx queue */ |
| @@ -5038,7 +5025,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, | |||
| 5038 | txq->last_desc = txq->size - 1; | 5025 | txq->last_desc = txq->size - 1; |
| 5039 | 5026 | ||
| 5040 | /* Set Tx descriptors queue starting address - indirect access */ | 5027 | /* Set Tx descriptors queue starting address - indirect access */ |
| 5041 | cpu = smp_processor_id(); | 5028 | cpu = get_cpu(); |
| 5042 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); | 5029 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); |
| 5043 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, | 5030 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, |
| 5044 | txq->descs_dma); | 5031 | txq->descs_dma); |
| @@ -5063,6 +5050,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, | |||
| 5063 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, | 5050 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, |
| 5064 | MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | | 5051 | MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | |
| 5065 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); | 5052 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); |
| 5053 | put_cpu(); | ||
| 5066 | 5054 | ||
| 5067 | /* WRR / EJP configuration - indirect access */ | 5055 | /* WRR / EJP configuration - indirect access */ |
| 5068 | tx_port_num = mvpp2_egress_port(port); | 5056 | tx_port_num = mvpp2_egress_port(port); |
| @@ -5133,10 +5121,11 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, | |||
| 5133 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); | 5121 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); |
| 5134 | 5122 | ||
| 5135 | /* Set Tx descriptors queue starting address and size */ | 5123 | /* Set Tx descriptors queue starting address and size */ |
| 5136 | cpu = smp_processor_id(); | 5124 | cpu = get_cpu(); |
| 5137 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); | 5125 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); |
| 5138 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); | 5126 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); |
| 5139 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); | 5127 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); |
| 5128 | put_cpu(); | ||
| 5140 | } | 5129 | } |
| 5141 | 5130 | ||
| 5142 | /* Cleanup Tx ports */ | 5131 | /* Cleanup Tx ports */ |
| @@ -5146,7 +5135,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) | |||
| 5146 | int delay, pending, cpu; | 5135 | int delay, pending, cpu; |
| 5147 | u32 val; | 5136 | u32 val; |
| 5148 | 5137 | ||
| 5149 | cpu = smp_processor_id(); | 5138 | cpu = get_cpu(); |
| 5150 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); | 5139 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); |
| 5151 | val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); | 5140 | val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); |
| 5152 | val |= MVPP2_TXQ_DRAIN_EN_MASK; | 5141 | val |= MVPP2_TXQ_DRAIN_EN_MASK; |
| @@ -5173,6 +5162,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) | |||
| 5173 | 5162 | ||
| 5174 | val &= ~MVPP2_TXQ_DRAIN_EN_MASK; | 5163 | val &= ~MVPP2_TXQ_DRAIN_EN_MASK; |
| 5175 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); | 5164 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); |
| 5165 | put_cpu(); | ||
| 5176 | 5166 | ||
| 5177 | for_each_present_cpu(cpu) { | 5167 | for_each_present_cpu(cpu) { |
| 5178 | txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); | 5168 | txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); |
| @@ -5420,7 +5410,7 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, | |||
| 5420 | 5410 | ||
| 5421 | /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ | 5411 | /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ |
| 5422 | static int mvpp2_rx_refill(struct mvpp2_port *port, | 5412 | static int mvpp2_rx_refill(struct mvpp2_port *port, |
| 5423 | struct mvpp2_bm_pool *bm_pool, u32 bm) | 5413 | struct mvpp2_bm_pool *bm_pool, int pool) |
| 5424 | { | 5414 | { |
| 5425 | dma_addr_t dma_addr; | 5415 | dma_addr_t dma_addr; |
| 5426 | phys_addr_t phys_addr; | 5416 | phys_addr_t phys_addr; |
| @@ -5432,7 +5422,7 @@ static int mvpp2_rx_refill(struct mvpp2_port *port, | |||
| 5432 | if (!buf) | 5422 | if (!buf) |
| 5433 | return -ENOMEM; | 5423 | return -ENOMEM; |
| 5434 | 5424 | ||
| 5435 | mvpp2_pool_refill(port, bm, dma_addr, phys_addr); | 5425 | mvpp2_pool_refill(port, pool, dma_addr, phys_addr); |
| 5436 | 5426 | ||
| 5437 | return 0; | 5427 | return 0; |
| 5438 | } | 5428 | } |
| @@ -5490,7 +5480,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
| 5490 | unsigned int frag_size; | 5480 | unsigned int frag_size; |
| 5491 | dma_addr_t dma_addr; | 5481 | dma_addr_t dma_addr; |
| 5492 | phys_addr_t phys_addr; | 5482 | phys_addr_t phys_addr; |
| 5493 | u32 bm, rx_status; | 5483 | u32 rx_status; |
| 5494 | int pool, rx_bytes, err; | 5484 | int pool, rx_bytes, err; |
| 5495 | void *data; | 5485 | void *data; |
| 5496 | 5486 | ||
| @@ -5502,8 +5492,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
| 5502 | phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); | 5492 | phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); |
| 5503 | data = (void *)phys_to_virt(phys_addr); | 5493 | data = (void *)phys_to_virt(phys_addr); |
| 5504 | 5494 | ||
| 5505 | bm = mvpp2_bm_cookie_build(port, rx_desc); | 5495 | pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> |
| 5506 | pool = mvpp2_bm_cookie_pool_get(bm); | 5496 | MVPP2_RXD_BM_POOL_ID_OFFS; |
| 5507 | bm_pool = &port->priv->bm_pools[pool]; | 5497 | bm_pool = &port->priv->bm_pools[pool]; |
| 5508 | 5498 | ||
| 5509 | /* In case of an error, release the requested buffer pointer | 5499 | /* In case of an error, release the requested buffer pointer |
| @@ -5516,7 +5506,7 @@ err_drop_frame: | |||
| 5516 | dev->stats.rx_errors++; | 5506 | dev->stats.rx_errors++; |
| 5517 | mvpp2_rx_error(port, rx_desc); | 5507 | mvpp2_rx_error(port, rx_desc); |
| 5518 | /* Return the buffer to the pool */ | 5508 | /* Return the buffer to the pool */ |
| 5519 | mvpp2_pool_refill(port, bm, dma_addr, phys_addr); | 5509 | mvpp2_pool_refill(port, pool, dma_addr, phys_addr); |
| 5520 | continue; | 5510 | continue; |
| 5521 | } | 5511 | } |
| 5522 | 5512 | ||
| @@ -5531,7 +5521,7 @@ err_drop_frame: | |||
| 5531 | goto err_drop_frame; | 5521 | goto err_drop_frame; |
| 5532 | } | 5522 | } |
| 5533 | 5523 | ||
| 5534 | err = mvpp2_rx_refill(port, bm_pool, bm); | 5524 | err = mvpp2_rx_refill(port, bm_pool, pool); |
| 5535 | if (err) { | 5525 | if (err) { |
| 5536 | netdev_err(port->dev, "failed to refill BM pools\n"); | 5526 | netdev_err(port->dev, "failed to refill BM pools\n"); |
| 5537 | goto err_drop_frame; | 5527 | goto err_drop_frame; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ae5fdc2df654..ffbcb27c05e5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
| @@ -1562,11 +1562,6 @@ static int mlx4_en_flow_replace(struct net_device *dev, | |||
| 1562 | qpn = priv->drop_qp.qpn; | 1562 | qpn = priv->drop_qp.qpn; |
| 1563 | else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { | 1563 | else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { |
| 1564 | qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); | 1564 | qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); |
| 1565 | if (qpn < priv->rss_map.base_qpn || | ||
| 1566 | qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) { | ||
| 1567 | en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn); | ||
| 1568 | return -EINVAL; | ||
| 1569 | } | ||
| 1570 | } else { | 1565 | } else { |
| 1571 | if (cmd->fs.ring_cookie >= priv->rx_ring_num) { | 1566 | if (cmd->fs.ring_cookie >= priv->rx_ring_num) { |
| 1572 | en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n", | 1567 | en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n", |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 1a670b681555..0710b3677464 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/etherdevice.h> | 35 | #include <linux/etherdevice.h> |
| 36 | 36 | ||
| 37 | #include <linux/mlx4/cmd.h> | 37 | #include <linux/mlx4/cmd.h> |
| 38 | #include <linux/mlx4/qp.h> | ||
| 38 | #include <linux/export.h> | 39 | #include <linux/export.h> |
| 39 | 40 | ||
| 40 | #include "mlx4.h" | 41 | #include "mlx4.h" |
| @@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev, | |||
| 985 | if (IS_ERR(mailbox)) | 986 | if (IS_ERR(mailbox)) |
| 986 | return PTR_ERR(mailbox); | 987 | return PTR_ERR(mailbox); |
| 987 | 988 | ||
| 989 | if (!mlx4_qp_lookup(dev, rule->qpn)) { | ||
| 990 | mlx4_err_rule(dev, "QP doesn't exist\n", rule); | ||
| 991 | ret = -EINVAL; | ||
| 992 | goto out; | ||
| 993 | } | ||
| 994 | |||
| 988 | trans_rule_ctrl_to_hw(rule, mailbox->buf); | 995 | trans_rule_ctrl_to_hw(rule, mailbox->buf); |
| 989 | 996 | ||
| 990 | size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); | 997 | size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); |
| 991 | 998 | ||
| 992 | list_for_each_entry(cur, &rule->list, list) { | 999 | list_for_each_entry(cur, &rule->list, list) { |
| 993 | ret = parse_trans_rule(dev, cur, mailbox->buf + size); | 1000 | ret = parse_trans_rule(dev, cur, mailbox->buf + size); |
| 994 | if (ret < 0) { | 1001 | if (ret < 0) |
| 995 | mlx4_free_cmd_mailbox(dev, mailbox); | 1002 | goto out; |
| 996 | return ret; | 1003 | |
| 997 | } | ||
| 998 | size += ret; | 1004 | size += ret; |
| 999 | } | 1005 | } |
| 1000 | 1006 | ||
| @@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev, | |||
| 1021 | } | 1027 | } |
| 1022 | } | 1028 | } |
| 1023 | 1029 | ||
| 1030 | out: | ||
| 1024 | mlx4_free_cmd_mailbox(dev, mailbox); | 1031 | mlx4_free_cmd_mailbox(dev, mailbox); |
| 1025 | 1032 | ||
| 1026 | return ret; | 1033 | return ret; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 2d6abd4662b1..5a310d313e94 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c | |||
| @@ -384,6 +384,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) | |||
| 384 | __mlx4_qp_free_icm(dev, qpn); | 384 | __mlx4_qp_free_icm(dev, qpn); |
| 385 | } | 385 | } |
| 386 | 386 | ||
| 387 | struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) | ||
| 388 | { | ||
| 389 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | ||
| 390 | struct mlx4_qp *qp; | ||
| 391 | |||
| 392 | spin_lock(&qp_table->lock); | ||
| 393 | |||
| 394 | qp = __mlx4_qp_lookup(dev, qpn); | ||
| 395 | |||
| 396 | spin_unlock(&qp_table->lock); | ||
| 397 | return qp; | ||
| 398 | } | ||
| 399 | |||
| 387 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) | 400 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) |
| 388 | { | 401 | { |
| 389 | struct mlx4_priv *priv = mlx4_priv(dev); | 402 | struct mlx4_priv *priv = mlx4_priv(dev); |
| @@ -471,6 +484,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, | |||
| 471 | } | 484 | } |
| 472 | 485 | ||
| 473 | if (attr & MLX4_UPDATE_QP_QOS_VPORT) { | 486 | if (attr & MLX4_UPDATE_QP_QOS_VPORT) { |
| 487 | if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) { | ||
| 488 | mlx4_warn(dev, "Granular QoS per VF is not enabled\n"); | ||
| 489 | err = -EOPNOTSUPP; | ||
| 490 | goto out; | ||
| 491 | } | ||
| 492 | |||
| 474 | qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; | 493 | qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; |
| 475 | cmd->qp_context.qos_vport = params->qos_vport; | 494 | cmd->qp_context.qos_vport = params->qos_vport; |
| 476 | } | 495 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 07516545474f..812783865205 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
| @@ -5255,6 +5255,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) | |||
| 5255 | mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); | 5255 | mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); |
| 5256 | } | 5256 | } |
| 5257 | 5257 | ||
| 5258 | static void update_qos_vpp(struct mlx4_update_qp_context *ctx, | ||
| 5259 | struct mlx4_vf_immed_vlan_work *work) | ||
| 5260 | { | ||
| 5261 | ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP); | ||
| 5262 | ctx->qp_context.qos_vport = work->qos_vport; | ||
| 5263 | } | ||
| 5264 | |||
| 5258 | void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) | 5265 | void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) |
| 5259 | { | 5266 | { |
| 5260 | struct mlx4_vf_immed_vlan_work *work = | 5267 | struct mlx4_vf_immed_vlan_work *work = |
| @@ -5369,11 +5376,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) | |||
| 5369 | qp->sched_queue & 0xC7; | 5376 | qp->sched_queue & 0xC7; |
| 5370 | upd_context->qp_context.pri_path.sched_queue |= | 5377 | upd_context->qp_context.pri_path.sched_queue |= |
| 5371 | ((work->qos & 0x7) << 3); | 5378 | ((work->qos & 0x7) << 3); |
| 5372 | upd_context->qp_mask |= | 5379 | |
| 5373 | cpu_to_be64(1ULL << | 5380 | if (dev->caps.flags2 & |
| 5374 | MLX4_UPD_QP_MASK_QOS_VPP); | 5381 | MLX4_DEV_CAP_FLAG2_QOS_VPP) |
| 5375 | upd_context->qp_context.qos_vport = | 5382 | update_qos_vpp(upd_context, work); |
| 5376 | work->qos_vport; | ||
| 5377 | } | 5383 | } |
| 5378 | 5384 | ||
| 5379 | err = mlx4_cmd(dev, mailbox->dma, | 5385 | err = mlx4_cmd(dev, mailbox->dma, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 2fd044b23875..944fc1742464 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
| @@ -458,13 +458,15 @@ struct mlx5e_mpw_info { | |||
| 458 | 458 | ||
| 459 | struct mlx5e_rx_am_stats { | 459 | struct mlx5e_rx_am_stats { |
| 460 | int ppms; /* packets per msec */ | 460 | int ppms; /* packets per msec */ |
| 461 | int bpms; /* bytes per msec */ | ||
| 461 | int epms; /* events per msec */ | 462 | int epms; /* events per msec */ |
| 462 | }; | 463 | }; |
| 463 | 464 | ||
| 464 | struct mlx5e_rx_am_sample { | 465 | struct mlx5e_rx_am_sample { |
| 465 | ktime_t time; | 466 | ktime_t time; |
| 466 | unsigned int pkt_ctr; | 467 | u32 pkt_ctr; |
| 467 | u16 event_ctr; | 468 | u32 byte_ctr; |
| 469 | u16 event_ctr; | ||
| 468 | }; | 470 | }; |
| 469 | 471 | ||
| 470 | struct mlx5e_rx_am { /* Adaptive Moderation */ | 472 | struct mlx5e_rx_am { /* Adaptive Moderation */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 8209affa75c3..16486dff1493 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
| @@ -1242,11 +1242,11 @@ static int mlx5e_get_ts_info(struct net_device *dev, | |||
| 1242 | SOF_TIMESTAMPING_RX_HARDWARE | | 1242 | SOF_TIMESTAMPING_RX_HARDWARE | |
| 1243 | SOF_TIMESTAMPING_RAW_HARDWARE; | 1243 | SOF_TIMESTAMPING_RAW_HARDWARE; |
| 1244 | 1244 | ||
| 1245 | info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) | | 1245 | info->tx_types = BIT(HWTSTAMP_TX_OFF) | |
| 1246 | (BIT(1) << HWTSTAMP_TX_ON); | 1246 | BIT(HWTSTAMP_TX_ON); |
| 1247 | 1247 | ||
| 1248 | info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) | | 1248 | info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | |
| 1249 | (BIT(1) << HWTSTAMP_FILTER_ALL); | 1249 | BIT(HWTSTAMP_FILTER_ALL); |
| 1250 | 1250 | ||
| 1251 | return 0; | 1251 | return 0; |
| 1252 | } | 1252 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 41cd22a223dc..277f4de30375 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -4241,7 +4241,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, | |||
| 4241 | return netdev; | 4241 | return netdev; |
| 4242 | 4242 | ||
| 4243 | err_cleanup_nic: | 4243 | err_cleanup_nic: |
| 4244 | profile->cleanup(priv); | 4244 | if (profile->cleanup) |
| 4245 | profile->cleanup(priv); | ||
| 4245 | free_netdev(netdev); | 4246 | free_netdev(netdev); |
| 4246 | 4247 | ||
| 4247 | return NULL; | 4248 | return NULL; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 79462c0368a0..46984a52a94b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
| @@ -791,6 +791,8 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, | |||
| 791 | params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); | 791 | params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); |
| 792 | params->num_tc = 1; | 792 | params->num_tc = 1; |
| 793 | params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; | 793 | params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; |
| 794 | |||
| 795 | mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); | ||
| 794 | } | 796 | } |
| 795 | 797 | ||
| 796 | static void mlx5e_build_rep_netdev(struct net_device *netdev) | 798 | static void mlx5e_build_rep_netdev(struct net_device *netdev) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index 02dd3a95ed8f..acf32fe952cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c | |||
| @@ -183,28 +183,27 @@ static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am) | |||
| 183 | mlx5e_am_step(am); | 183 | mlx5e_am_step(am); |
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | #define IS_SIGNIFICANT_DIFF(val, ref) \ | ||
| 187 | (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ | ||
| 188 | |||
| 186 | static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, | 189 | static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, |
| 187 | struct mlx5e_rx_am_stats *prev) | 190 | struct mlx5e_rx_am_stats *prev) |
| 188 | { | 191 | { |
| 189 | int diff; | 192 | if (!prev->bpms) |
| 190 | 193 | return curr->bpms ? MLX5E_AM_STATS_BETTER : | |
| 191 | if (!prev->ppms) | ||
| 192 | return curr->ppms ? MLX5E_AM_STATS_BETTER : | ||
| 193 | MLX5E_AM_STATS_SAME; | 194 | MLX5E_AM_STATS_SAME; |
| 194 | 195 | ||
| 195 | diff = curr->ppms - prev->ppms; | 196 | if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) |
| 196 | if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */ | 197 | return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : |
| 197 | return (diff > 0) ? MLX5E_AM_STATS_BETTER : | 198 | MLX5E_AM_STATS_WORSE; |
| 198 | MLX5E_AM_STATS_WORSE; | ||
| 199 | 199 | ||
| 200 | if (!prev->epms) | 200 | if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) |
| 201 | return curr->epms ? MLX5E_AM_STATS_WORSE : | 201 | return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : |
| 202 | MLX5E_AM_STATS_SAME; | 202 | MLX5E_AM_STATS_WORSE; |
| 203 | 203 | ||
| 204 | diff = curr->epms - prev->epms; | 204 | if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) |
| 205 | if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */ | 205 | return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : |
| 206 | return (diff < 0) ? MLX5E_AM_STATS_BETTER : | 206 | MLX5E_AM_STATS_WORSE; |
| 207 | MLX5E_AM_STATS_WORSE; | ||
| 208 | 207 | ||
| 209 | return MLX5E_AM_STATS_SAME; | 208 | return MLX5E_AM_STATS_SAME; |
| 210 | } | 209 | } |
| @@ -266,10 +265,13 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq, | |||
| 266 | { | 265 | { |
| 267 | s->time = ktime_get(); | 266 | s->time = ktime_get(); |
| 268 | s->pkt_ctr = rq->stats.packets; | 267 | s->pkt_ctr = rq->stats.packets; |
| 268 | s->byte_ctr = rq->stats.bytes; | ||
| 269 | s->event_ctr = rq->cq.event_ctr; | 269 | s->event_ctr = rq->cq.event_ctr; |
| 270 | } | 270 | } |
| 271 | 271 | ||
| 272 | #define MLX5E_AM_NEVENTS 64 | 272 | #define MLX5E_AM_NEVENTS 64 |
| 273 | #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) | ||
| 274 | #define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) | ||
| 273 | 275 | ||
| 274 | static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, | 276 | static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, |
| 275 | struct mlx5e_rx_am_sample *end, | 277 | struct mlx5e_rx_am_sample *end, |
| @@ -277,13 +279,17 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, | |||
| 277 | { | 279 | { |
| 278 | /* u32 holds up to 71 minutes, should be enough */ | 280 | /* u32 holds up to 71 minutes, should be enough */ |
| 279 | u32 delta_us = ktime_us_delta(end->time, start->time); | 281 | u32 delta_us = ktime_us_delta(end->time, start->time); |
| 280 | unsigned int npkts = end->pkt_ctr - start->pkt_ctr; | 282 | u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); |
| 283 | u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, | ||
| 284 | start->byte_ctr); | ||
| 281 | 285 | ||
| 282 | if (!delta_us) | 286 | if (!delta_us) |
| 283 | return; | 287 | return; |
| 284 | 288 | ||
| 285 | curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; | 289 | curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); |
| 286 | curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; | 290 | curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); |
| 291 | curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC, | ||
| 292 | delta_us); | ||
| 287 | } | 293 | } |
| 288 | 294 | ||
| 289 | void mlx5e_rx_am_work(struct work_struct *work) | 295 | void mlx5e_rx_am_work(struct work_struct *work) |
| @@ -308,7 +314,8 @@ void mlx5e_rx_am(struct mlx5e_rq *rq) | |||
| 308 | 314 | ||
| 309 | switch (am->state) { | 315 | switch (am->state) { |
| 310 | case MLX5E_AM_MEASURE_IN_PROGRESS: | 316 | case MLX5E_AM_MEASURE_IN_PROGRESS: |
| 311 | nevents = rq->cq.event_ctr - am->start_sample.event_ctr; | 317 | nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr, |
| 318 | am->start_sample.event_ctr); | ||
| 312 | if (nevents < MLX5E_AM_NEVENTS) | 319 | if (nevents < MLX5E_AM_NEVENTS) |
| 313 | break; | 320 | break; |
| 314 | mlx5e_am_sample(rq, &end_sample); | 321 | mlx5e_am_sample(rq, &end_sample); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 53e4992d6511..f81c3aa60b46 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | |||
| @@ -417,20 +417,13 @@ struct mlx5e_stats { | |||
| 417 | }; | 417 | }; |
| 418 | 418 | ||
| 419 | static const struct counter_desc mlx5e_pme_status_desc[] = { | 419 | static const struct counter_desc mlx5e_pme_status_desc[] = { |
| 420 | { "module_plug", 0 }, | ||
| 421 | { "module_unplug", 8 }, | 420 | { "module_unplug", 8 }, |
| 422 | }; | 421 | }; |
| 423 | 422 | ||
| 424 | static const struct counter_desc mlx5e_pme_error_desc[] = { | 423 | static const struct counter_desc mlx5e_pme_error_desc[] = { |
| 425 | { "module_pwr_budget_exd", 0 }, /* power budget exceed */ | 424 | { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ |
| 426 | { "module_long_range", 8 }, /* long range for non MLNX cable */ | 425 | { "module_high_temp", 48 }, /* high temperature */ |
| 427 | { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ | ||
| 428 | { "module_no_eeprom", 24 }, /* no eeprom/retry time out */ | ||
| 429 | { "module_enforce_part", 32 }, /* enforce part number list */ | ||
| 430 | { "module_unknown_id", 40 }, /* unknown identifier */ | ||
| 431 | { "module_high_temp", 48 }, /* high temperature */ | ||
| 432 | { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ | 426 | { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ |
| 433 | { "module_unknown_status", 64 }, | ||
| 434 | }; | 427 | }; |
| 435 | 428 | ||
| 436 | #endif /* __MLX5_EN_STATS_H__ */ | 429 | #endif /* __MLX5_EN_STATS_H__ */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ec63158ab643..9df9fc0d26f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
| @@ -895,7 +895,6 @@ static struct mlx5_fields fields[] = { | |||
| 895 | {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])}, | 895 | {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])}, |
| 896 | {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)}, | 896 | {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)}, |
| 897 | 897 | ||
| 898 | {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)}, | ||
| 899 | {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)}, | 898 | {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)}, |
| 900 | {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)}, | 899 | {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)}, |
| 901 | {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)}, | 900 | {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)}, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f991f669047e..a53e982a6863 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
| @@ -906,21 +906,34 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) | |||
| 906 | return 0; | 906 | return 0; |
| 907 | } | 907 | } |
| 908 | 908 | ||
| 909 | int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) | 909 | static int mlx5_devlink_eswitch_check(struct devlink *devlink) |
| 910 | { | 910 | { |
| 911 | struct mlx5_core_dev *dev; | 911 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
| 912 | u16 cur_mlx5_mode, mlx5_mode = 0; | ||
| 913 | 912 | ||
| 914 | dev = devlink_priv(devlink); | 913 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) |
| 914 | return -EOPNOTSUPP; | ||
| 915 | 915 | ||
| 916 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | 916 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) |
| 917 | return -EOPNOTSUPP; | 917 | return -EOPNOTSUPP; |
| 918 | 918 | ||
| 919 | cur_mlx5_mode = dev->priv.eswitch->mode; | 919 | if (dev->priv.eswitch->mode == SRIOV_NONE) |
| 920 | |||
| 921 | if (cur_mlx5_mode == SRIOV_NONE) | ||
| 922 | return -EOPNOTSUPP; | 920 | return -EOPNOTSUPP; |
| 923 | 921 | ||
| 922 | return 0; | ||
| 923 | } | ||
| 924 | |||
| 925 | int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) | ||
| 926 | { | ||
| 927 | struct mlx5_core_dev *dev = devlink_priv(devlink); | ||
| 928 | u16 cur_mlx5_mode, mlx5_mode = 0; | ||
| 929 | int err; | ||
| 930 | |||
| 931 | err = mlx5_devlink_eswitch_check(devlink); | ||
| 932 | if (err) | ||
| 933 | return err; | ||
| 934 | |||
| 935 | cur_mlx5_mode = dev->priv.eswitch->mode; | ||
| 936 | |||
| 924 | if (esw_mode_from_devlink(mode, &mlx5_mode)) | 937 | if (esw_mode_from_devlink(mode, &mlx5_mode)) |
| 925 | return -EINVAL; | 938 | return -EINVAL; |
| 926 | 939 | ||
| @@ -937,15 +950,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) | |||
| 937 | 950 | ||
| 938 | int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) | 951 | int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) |
| 939 | { | 952 | { |
| 940 | struct mlx5_core_dev *dev; | 953 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
| 941 | 954 | int err; | |
| 942 | dev = devlink_priv(devlink); | ||
| 943 | |||
| 944 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | ||
| 945 | return -EOPNOTSUPP; | ||
| 946 | 955 | ||
| 947 | if (dev->priv.eswitch->mode == SRIOV_NONE) | 956 | err = mlx5_devlink_eswitch_check(devlink); |
| 948 | return -EOPNOTSUPP; | 957 | if (err) |
| 958 | return err; | ||
| 949 | 959 | ||
| 950 | return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); | 960 | return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); |
| 951 | } | 961 | } |
| @@ -954,15 +964,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) | |||
| 954 | { | 964 | { |
| 955 | struct mlx5_core_dev *dev = devlink_priv(devlink); | 965 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
| 956 | struct mlx5_eswitch *esw = dev->priv.eswitch; | 966 | struct mlx5_eswitch *esw = dev->priv.eswitch; |
| 957 | int num_vports = esw->enabled_vports; | ||
| 958 | int err, vport; | 967 | int err, vport; |
| 959 | u8 mlx5_mode; | 968 | u8 mlx5_mode; |
| 960 | 969 | ||
| 961 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | 970 | err = mlx5_devlink_eswitch_check(devlink); |
| 962 | return -EOPNOTSUPP; | 971 | if (err) |
| 963 | 972 | return err; | |
| 964 | if (esw->mode == SRIOV_NONE) | ||
| 965 | return -EOPNOTSUPP; | ||
| 966 | 973 | ||
| 967 | switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { | 974 | switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { |
| 968 | case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: | 975 | case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: |
| @@ -985,7 +992,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) | |||
| 985 | if (err) | 992 | if (err) |
| 986 | goto out; | 993 | goto out; |
| 987 | 994 | ||
| 988 | for (vport = 1; vport < num_vports; vport++) { | 995 | for (vport = 1; vport < esw->enabled_vports; vport++) { |
| 989 | err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); | 996 | err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); |
| 990 | if (err) { | 997 | if (err) { |
| 991 | esw_warn(dev, "Failed to set min inline on vport %d\n", | 998 | esw_warn(dev, "Failed to set min inline on vport %d\n", |
| @@ -1010,12 +1017,11 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) | |||
| 1010 | { | 1017 | { |
| 1011 | struct mlx5_core_dev *dev = devlink_priv(devlink); | 1018 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
| 1012 | struct mlx5_eswitch *esw = dev->priv.eswitch; | 1019 | struct mlx5_eswitch *esw = dev->priv.eswitch; |
| 1020 | int err; | ||
| 1013 | 1021 | ||
| 1014 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | 1022 | err = mlx5_devlink_eswitch_check(devlink); |
| 1015 | return -EOPNOTSUPP; | 1023 | if (err) |
| 1016 | 1024 | return err; | |
| 1017 | if (esw->mode == SRIOV_NONE) | ||
| 1018 | return -EOPNOTSUPP; | ||
| 1019 | 1025 | ||
| 1020 | return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); | 1026 | return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); |
| 1021 | } | 1027 | } |
| @@ -1062,11 +1068,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) | |||
| 1062 | struct mlx5_eswitch *esw = dev->priv.eswitch; | 1068 | struct mlx5_eswitch *esw = dev->priv.eswitch; |
| 1063 | int err; | 1069 | int err; |
| 1064 | 1070 | ||
| 1065 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | 1071 | err = mlx5_devlink_eswitch_check(devlink); |
| 1066 | return -EOPNOTSUPP; | 1072 | if (err) |
| 1067 | 1073 | return err; | |
| 1068 | if (esw->mode == SRIOV_NONE) | ||
| 1069 | return -EOPNOTSUPP; | ||
| 1070 | 1074 | ||
| 1071 | if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && | 1075 | if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && |
| 1072 | (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || | 1076 | (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || |
| @@ -1105,12 +1109,11 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) | |||
| 1105 | { | 1109 | { |
| 1106 | struct mlx5_core_dev *dev = devlink_priv(devlink); | 1110 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
| 1107 | struct mlx5_eswitch *esw = dev->priv.eswitch; | 1111 | struct mlx5_eswitch *esw = dev->priv.eswitch; |
| 1112 | int err; | ||
| 1108 | 1113 | ||
| 1109 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | 1114 | err = mlx5_devlink_eswitch_check(devlink); |
| 1110 | return -EOPNOTSUPP; | 1115 | if (err) |
| 1111 | 1116 | return err; | |
| 1112 | if (esw->mode == SRIOV_NONE) | ||
| 1113 | return -EOPNOTSUPP; | ||
| 1114 | 1117 | ||
| 1115 | *encap = esw->offloads.encap; | 1118 | *encap = esw->offloads.encap; |
| 1116 | return 0; | 1119 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0e487e8ca634..8f5125ccd8d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
| @@ -862,7 +862,7 @@ struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace | |||
| 862 | ft_attr.level = level; | 862 | ft_attr.level = level; |
| 863 | ft_attr.prio = prio; | 863 | ft_attr.prio = prio; |
| 864 | 864 | ||
| 865 | return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0); | 865 | return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport); |
| 866 | } | 866 | } |
| 867 | 867 | ||
| 868 | struct mlx5_flow_table* | 868 | struct mlx5_flow_table* |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 44f59b1d6f0f..f27f84ffbc85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
| @@ -275,10 +275,8 @@ static void poll_health(unsigned long data) | |||
| 275 | struct mlx5_core_health *health = &dev->priv.health; | 275 | struct mlx5_core_health *health = &dev->priv.health; |
| 276 | u32 count; | 276 | u32 count; |
| 277 | 277 | ||
| 278 | if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { | 278 | if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) |
| 279 | mod_timer(&health->timer, get_next_poll_jiffies()); | 279 | goto out; |
| 280 | return; | ||
| 281 | } | ||
| 282 | 280 | ||
| 283 | count = ioread32be(health->health_counter); | 281 | count = ioread32be(health->health_counter); |
| 284 | if (count == health->prev) | 282 | if (count == health->prev) |
| @@ -290,8 +288,6 @@ static void poll_health(unsigned long data) | |||
| 290 | if (health->miss_counter == MAX_MISSES) { | 288 | if (health->miss_counter == MAX_MISSES) { |
| 291 | dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); | 289 | dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); |
| 292 | print_health_info(dev); | 290 | print_health_info(dev); |
| 293 | } else { | ||
| 294 | mod_timer(&health->timer, get_next_poll_jiffies()); | ||
| 295 | } | 291 | } |
| 296 | 292 | ||
| 297 | if (in_fatal(dev) && !health->sick) { | 293 | if (in_fatal(dev) && !health->sick) { |
| @@ -305,6 +301,9 @@ static void poll_health(unsigned long data) | |||
| 305 | "new health works are not permitted at this stage\n"); | 301 | "new health works are not permitted at this stage\n"); |
| 306 | spin_unlock(&health->wq_lock); | 302 | spin_unlock(&health->wq_lock); |
| 307 | } | 303 | } |
| 304 | |||
| 305 | out: | ||
| 306 | mod_timer(&health->timer, get_next_poll_jiffies()); | ||
| 308 | } | 307 | } |
| 309 | 308 | ||
| 310 | void mlx5_start_health_poll(struct mlx5_core_dev *dev) | 309 | void mlx5_start_health_poll(struct mlx5_core_dev *dev) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index fe5546bb4153..13be264587f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
| @@ -175,8 +175,9 @@ static struct mlx5_profile profile[] = { | |||
| 175 | }, | 175 | }, |
| 176 | }; | 176 | }; |
| 177 | 177 | ||
| 178 | #define FW_INIT_TIMEOUT_MILI 2000 | 178 | #define FW_INIT_TIMEOUT_MILI 2000 |
| 179 | #define FW_INIT_WAIT_MS 2 | 179 | #define FW_INIT_WAIT_MS 2 |
| 180 | #define FW_PRE_INIT_TIMEOUT_MILI 10000 | ||
| 180 | 181 | ||
| 181 | static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) | 182 | static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) |
| 182 | { | 183 | { |
| @@ -537,8 +538,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) | |||
| 537 | /* disable cmdif checksum */ | 538 | /* disable cmdif checksum */ |
| 538 | MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); | 539 | MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); |
| 539 | 540 | ||
| 540 | /* If the HCA supports 4K UARs use it */ | 541 | /* Enable 4K UAR only when HCA supports it and page size is bigger |
| 541 | if (MLX5_CAP_GEN_MAX(dev, uar_4k)) | 542 | * than 4K. |
| 543 | */ | ||
| 544 | if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096) | ||
| 542 | MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1); | 545 | MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1); |
| 543 | 546 | ||
| 544 | MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); | 547 | MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); |
| @@ -621,10 +624,9 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) | |||
| 621 | cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), | 624 | cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), |
| 622 | priv->irq_info[i].mask); | 625 | priv->irq_info[i].mask); |
| 623 | 626 | ||
| 624 | #ifdef CONFIG_SMP | 627 | if (IS_ENABLED(CONFIG_SMP) && |
| 625 | if (irq_set_affinity_hint(irq, priv->irq_info[i].mask)) | 628 | irq_set_affinity_hint(irq, priv->irq_info[i].mask)) |
| 626 | mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); | 629 | mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); |
| 627 | #endif | ||
| 628 | 630 | ||
| 629 | return 0; | 631 | return 0; |
| 630 | } | 632 | } |
| @@ -1012,6 +1014,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, | |||
| 1012 | */ | 1014 | */ |
| 1013 | dev->state = MLX5_DEVICE_STATE_UP; | 1015 | dev->state = MLX5_DEVICE_STATE_UP; |
| 1014 | 1016 | ||
| 1017 | /* wait for firmware to accept initialization segments configurations | ||
| 1018 | */ | ||
| 1019 | err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI); | ||
| 1020 | if (err) { | ||
| 1021 | dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", | ||
| 1022 | FW_PRE_INIT_TIMEOUT_MILI); | ||
| 1023 | goto out; | ||
| 1024 | } | ||
| 1025 | |||
| 1015 | err = mlx5_cmd_init(dev); | 1026 | err = mlx5_cmd_init(dev); |
| 1016 | if (err) { | 1027 | if (err) { |
| 1017 | dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); | 1028 | dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 9f89c4137d21..0744452a0b18 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
| @@ -3334,6 +3334,9 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, | |||
| 3334 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); | 3334 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); |
| 3335 | u16 vid = vlan_dev_vlan_id(vlan_dev); | 3335 | u16 vid = vlan_dev_vlan_id(vlan_dev); |
| 3336 | 3336 | ||
| 3337 | if (netif_is_bridge_port(vlan_dev)) | ||
| 3338 | return 0; | ||
| 3339 | |||
| 3337 | if (mlxsw_sp_port_dev_check(real_dev)) | 3340 | if (mlxsw_sp_port_dev_check(real_dev)) |
| 3338 | return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, | 3341 | return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, |
| 3339 | vid); | 3342 | vid); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 483241b4b05d..a672f6a860dc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c | |||
| @@ -2956,7 +2956,7 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, | |||
| 2956 | qed_wr(p_hwfn, | 2956 | qed_wr(p_hwfn, |
| 2957 | p_ptt, | 2957 | p_ptt, |
| 2958 | s_storm_defs[storm_id].cm_ctx_wr_addr, | 2958 | s_storm_defs[storm_id].cm_ctx_wr_addr, |
| 2959 | BIT(9) | lid); | 2959 | (i << 9) | lid); |
| 2960 | *(dump_buf + offset) = qed_rd(p_hwfn, | 2960 | *(dump_buf + offset) = qed_rd(p_hwfn, |
| 2961 | p_ptt, | 2961 | p_ptt, |
| 2962 | rd_reg_addr); | 2962 | rd_reg_addr); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 537d1236a4fe..715b3aaf83ac 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
| @@ -1730,7 +1730,8 @@ void qed_get_protocol_stats(struct qed_dev *cdev, | |||
| 1730 | qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); | 1730 | qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); |
| 1731 | break; | 1731 | break; |
| 1732 | default: | 1732 | default: |
| 1733 | DP_ERR(cdev, "Invalid protocol type = %d\n", type); | 1733 | DP_VERBOSE(cdev, QED_MSG_SP, |
| 1734 | "Invalid protocol type = %d\n", type); | ||
| 1734 | return; | 1735 | return; |
| 1735 | } | 1736 | } |
| 1736 | } | 1737 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 7245b1072518..81312924df14 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
| @@ -1824,22 +1824,44 @@ struct qlcnic_hardware_ops { | |||
| 1824 | u32 (*get_cap_size)(void *, int); | 1824 | u32 (*get_cap_size)(void *, int); |
| 1825 | void (*set_sys_info)(void *, int, u32); | 1825 | void (*set_sys_info)(void *, int, u32); |
| 1826 | void (*store_cap_mask)(void *, u32); | 1826 | void (*store_cap_mask)(void *, u32); |
| 1827 | bool (*encap_rx_offload) (struct qlcnic_adapter *adapter); | ||
| 1828 | bool (*encap_tx_offload) (struct qlcnic_adapter *adapter); | ||
| 1827 | }; | 1829 | }; |
| 1828 | 1830 | ||
| 1829 | extern struct qlcnic_nic_template qlcnic_vf_ops; | 1831 | extern struct qlcnic_nic_template qlcnic_vf_ops; |
| 1830 | 1832 | ||
| 1831 | static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter) | 1833 | static inline bool qlcnic_83xx_encap_tx_offload(struct qlcnic_adapter *adapter) |
| 1832 | { | 1834 | { |
| 1833 | return adapter->ahw->extra_capability[0] & | 1835 | return adapter->ahw->extra_capability[0] & |
| 1834 | QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD; | 1836 | QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD; |
| 1835 | } | 1837 | } |
| 1836 | 1838 | ||
| 1837 | static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter) | 1839 | static inline bool qlcnic_83xx_encap_rx_offload(struct qlcnic_adapter *adapter) |
| 1838 | { | 1840 | { |
| 1839 | return adapter->ahw->extra_capability[0] & | 1841 | return adapter->ahw->extra_capability[0] & |
| 1840 | QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD; | 1842 | QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD; |
| 1841 | } | 1843 | } |
| 1842 | 1844 | ||
| 1845 | static inline bool qlcnic_82xx_encap_tx_offload(struct qlcnic_adapter *adapter) | ||
| 1846 | { | ||
| 1847 | return false; | ||
| 1848 | } | ||
| 1849 | |||
| 1850 | static inline bool qlcnic_82xx_encap_rx_offload(struct qlcnic_adapter *adapter) | ||
| 1851 | { | ||
| 1852 | return false; | ||
| 1853 | } | ||
| 1854 | |||
| 1855 | static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter) | ||
| 1856 | { | ||
| 1857 | return adapter->ahw->hw_ops->encap_rx_offload(adapter); | ||
| 1858 | } | ||
| 1859 | |||
| 1860 | static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter) | ||
| 1861 | { | ||
| 1862 | return adapter->ahw->hw_ops->encap_tx_offload(adapter); | ||
| 1863 | } | ||
| 1864 | |||
| 1843 | static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter) | 1865 | static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter) |
| 1844 | { | 1866 | { |
| 1845 | return adapter->nic_ops->start_firmware(adapter); | 1867 | return adapter->nic_ops->start_firmware(adapter); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 4fb68797630e..f7080d0ab874 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
| @@ -242,6 +242,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = { | |||
| 242 | .get_cap_size = qlcnic_83xx_get_cap_size, | 242 | .get_cap_size = qlcnic_83xx_get_cap_size, |
| 243 | .set_sys_info = qlcnic_83xx_set_sys_info, | 243 | .set_sys_info = qlcnic_83xx_set_sys_info, |
| 244 | .store_cap_mask = qlcnic_83xx_store_cap_mask, | 244 | .store_cap_mask = qlcnic_83xx_store_cap_mask, |
| 245 | .encap_rx_offload = qlcnic_83xx_encap_rx_offload, | ||
| 246 | .encap_tx_offload = qlcnic_83xx_encap_tx_offload, | ||
| 245 | }; | 247 | }; |
| 246 | 248 | ||
| 247 | static struct qlcnic_nic_template qlcnic_83xx_ops = { | 249 | static struct qlcnic_nic_template qlcnic_83xx_ops = { |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 838cc0ceafd8..7848cf04b29a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c | |||
| @@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) | |||
| 341 | } | 341 | } |
| 342 | return -EIO; | 342 | return -EIO; |
| 343 | } | 343 | } |
| 344 | usleep_range(1000, 1500); | 344 | udelay(1200); |
| 345 | } | 345 | } |
| 346 | 346 | ||
| 347 | if (id_reg) | 347 | if (id_reg) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index b6628aaa6e4a..1b5f7d57b6f8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
| @@ -632,6 +632,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = { | |||
| 632 | .get_cap_size = qlcnic_82xx_get_cap_size, | 632 | .get_cap_size = qlcnic_82xx_get_cap_size, |
| 633 | .set_sys_info = qlcnic_82xx_set_sys_info, | 633 | .set_sys_info = qlcnic_82xx_set_sys_info, |
| 634 | .store_cap_mask = qlcnic_82xx_store_cap_mask, | 634 | .store_cap_mask = qlcnic_82xx_store_cap_mask, |
| 635 | .encap_rx_offload = qlcnic_82xx_encap_rx_offload, | ||
| 636 | .encap_tx_offload = qlcnic_82xx_encap_tx_offload, | ||
| 635 | }; | 637 | }; |
| 636 | 638 | ||
| 637 | static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter) | 639 | static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 2f656f395f39..c58180f40844 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | |||
| @@ -77,6 +77,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { | |||
| 77 | .free_mac_list = qlcnic_sriov_vf_free_mac_list, | 77 | .free_mac_list = qlcnic_sriov_vf_free_mac_list, |
| 78 | .enable_sds_intr = qlcnic_83xx_enable_sds_intr, | 78 | .enable_sds_intr = qlcnic_83xx_enable_sds_intr, |
| 79 | .disable_sds_intr = qlcnic_83xx_disable_sds_intr, | 79 | .disable_sds_intr = qlcnic_83xx_disable_sds_intr, |
| 80 | .encap_rx_offload = qlcnic_83xx_encap_rx_offload, | ||
| 81 | .encap_tx_offload = qlcnic_83xx_encap_tx_offload, | ||
| 80 | }; | 82 | }; |
| 81 | 83 | ||
| 82 | static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { | 84 | static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index cc065ffbe4b5..bcd4708b3745 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c | |||
| @@ -931,7 +931,7 @@ int emac_mac_up(struct emac_adapter *adpt) | |||
| 931 | emac_mac_config(adpt); | 931 | emac_mac_config(adpt); |
| 932 | emac_mac_rx_descs_refill(adpt, &adpt->rx_q); | 932 | emac_mac_rx_descs_refill(adpt, &adpt->rx_q); |
| 933 | 933 | ||
| 934 | adpt->phydev->irq = PHY_IGNORE_INTERRUPT; | 934 | adpt->phydev->irq = PHY_POLL; |
| 935 | ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, | 935 | ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, |
| 936 | PHY_INTERFACE_MODE_SGMII); | 936 | PHY_INTERFACE_MODE_SGMII); |
| 937 | if (ret) { | 937 | if (ret) { |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c index 441c19366489..18461fcb9815 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c | |||
| @@ -13,15 +13,11 @@ | |||
| 13 | /* Qualcomm Technologies, Inc. EMAC PHY Controller driver. | 13 | /* Qualcomm Technologies, Inc. EMAC PHY Controller driver. |
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/of.h> | ||
| 18 | #include <linux/of_net.h> | ||
| 19 | #include <linux/of_mdio.h> | 16 | #include <linux/of_mdio.h> |
| 20 | #include <linux/phy.h> | 17 | #include <linux/phy.h> |
| 21 | #include <linux/iopoll.h> | 18 | #include <linux/iopoll.h> |
| 22 | #include <linux/acpi.h> | 19 | #include <linux/acpi.h> |
| 23 | #include "emac.h" | 20 | #include "emac.h" |
| 24 | #include "emac-mac.h" | ||
| 25 | 21 | ||
| 26 | /* EMAC base register offsets */ | 22 | /* EMAC base register offsets */ |
| 27 | #define EMAC_MDIO_CTRL 0x001414 | 23 | #define EMAC_MDIO_CTRL 0x001414 |
| @@ -52,62 +48,10 @@ | |||
| 52 | 48 | ||
| 53 | #define MDIO_WAIT_TIMES 1000 | 49 | #define MDIO_WAIT_TIMES 1000 |
| 54 | 50 | ||
| 55 | #define EMAC_LINK_SPEED_DEFAULT (\ | ||
| 56 | EMAC_LINK_SPEED_10_HALF |\ | ||
| 57 | EMAC_LINK_SPEED_10_FULL |\ | ||
| 58 | EMAC_LINK_SPEED_100_HALF |\ | ||
| 59 | EMAC_LINK_SPEED_100_FULL |\ | ||
| 60 | EMAC_LINK_SPEED_1GB_FULL) | ||
| 61 | |||
| 62 | /** | ||
| 63 | * emac_phy_mdio_autopoll_disable() - disable mdio autopoll | ||
| 64 | * @adpt: the emac adapter | ||
| 65 | * | ||
| 66 | * The autopoll feature takes over the MDIO bus. In order for | ||
| 67 | * the PHY driver to be able to talk to the PHY over the MDIO | ||
| 68 | * bus, we need to temporarily disable the autopoll feature. | ||
| 69 | */ | ||
| 70 | static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt) | ||
| 71 | { | ||
| 72 | u32 val; | ||
| 73 | |||
| 74 | /* disable autopoll */ | ||
| 75 | emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0); | ||
| 76 | |||
| 77 | /* wait for any mdio polling to complete */ | ||
| 78 | if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val, | ||
| 79 | !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100)) | ||
| 80 | return 0; | ||
| 81 | |||
| 82 | /* failed to disable; ensure it is enabled before returning */ | ||
| 83 | emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN); | ||
| 84 | |||
| 85 | return -EBUSY; | ||
| 86 | } | ||
| 87 | |||
| 88 | /** | ||
| 89 | * emac_phy_mdio_autopoll_disable() - disable mdio autopoll | ||
| 90 | * @adpt: the emac adapter | ||
| 91 | * | ||
| 92 | * The EMAC has the ability to poll the external PHY on the MDIO | ||
| 93 | * bus for link state changes. This eliminates the need for the | ||
| 94 | * driver to poll the phy. If if the link state does change, | ||
| 95 | * the EMAC issues an interrupt on behalf of the PHY. | ||
| 96 | */ | ||
| 97 | static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt) | ||
| 98 | { | ||
| 99 | emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN); | ||
| 100 | } | ||
| 101 | |||
| 102 | static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) | 51 | static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) |
| 103 | { | 52 | { |
| 104 | struct emac_adapter *adpt = bus->priv; | 53 | struct emac_adapter *adpt = bus->priv; |
| 105 | u32 reg; | 54 | u32 reg; |
| 106 | int ret; | ||
| 107 | |||
| 108 | ret = emac_phy_mdio_autopoll_disable(adpt); | ||
| 109 | if (ret) | ||
| 110 | return ret; | ||
| 111 | 55 | ||
| 112 | emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, | 56 | emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, |
| 113 | (addr << PHY_ADDR_SHFT)); | 57 | (addr << PHY_ADDR_SHFT)); |
| @@ -122,24 +66,15 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) | |||
| 122 | if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, | 66 | if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, |
| 123 | !(reg & (MDIO_START | MDIO_BUSY)), | 67 | !(reg & (MDIO_START | MDIO_BUSY)), |
| 124 | 100, MDIO_WAIT_TIMES * 100)) | 68 | 100, MDIO_WAIT_TIMES * 100)) |
| 125 | ret = -EIO; | 69 | return -EIO; |
| 126 | else | ||
| 127 | ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; | ||
| 128 | 70 | ||
| 129 | emac_phy_mdio_autopoll_enable(adpt); | 71 | return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; |
| 130 | |||
| 131 | return ret; | ||
| 132 | } | 72 | } |
| 133 | 73 | ||
| 134 | static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) | 74 | static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) |
| 135 | { | 75 | { |
| 136 | struct emac_adapter *adpt = bus->priv; | 76 | struct emac_adapter *adpt = bus->priv; |
| 137 | u32 reg; | 77 | u32 reg; |
| 138 | int ret; | ||
| 139 | |||
| 140 | ret = emac_phy_mdio_autopoll_disable(adpt); | ||
| 141 | if (ret) | ||
| 142 | return ret; | ||
| 143 | 78 | ||
| 144 | emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, | 79 | emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, |
| 145 | (addr << PHY_ADDR_SHFT)); | 80 | (addr << PHY_ADDR_SHFT)); |
| @@ -155,11 +90,9 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) | |||
| 155 | if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, | 90 | if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, |
| 156 | !(reg & (MDIO_START | MDIO_BUSY)), 100, | 91 | !(reg & (MDIO_START | MDIO_BUSY)), 100, |
| 157 | MDIO_WAIT_TIMES * 100)) | 92 | MDIO_WAIT_TIMES * 100)) |
| 158 | ret = -EIO; | 93 | return -EIO; |
| 159 | 94 | ||
| 160 | emac_phy_mdio_autopoll_enable(adpt); | 95 | return 0; |
| 161 | |||
| 162 | return ret; | ||
| 163 | } | 96 | } |
| 164 | 97 | ||
| 165 | /* Configure the MDIO bus and connect the external PHY */ | 98 | /* Configure the MDIO bus and connect the external PHY */ |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 28a8cdc36485..98a326faea29 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c | |||
| @@ -50,19 +50,7 @@ | |||
| 50 | #define DMAR_DLY_CNT_DEF 15 | 50 | #define DMAR_DLY_CNT_DEF 15 |
| 51 | #define DMAW_DLY_CNT_DEF 4 | 51 | #define DMAW_DLY_CNT_DEF 4 |
| 52 | 52 | ||
| 53 | #define IMR_NORMAL_MASK (\ | 53 | #define IMR_NORMAL_MASK (ISR_ERROR | ISR_OVER | ISR_TX_PKT) |
| 54 | ISR_ERROR |\ | ||
| 55 | ISR_GPHY_LINK |\ | ||
| 56 | ISR_TX_PKT |\ | ||
| 57 | GPHY_WAKEUP_INT) | ||
| 58 | |||
| 59 | #define IMR_EXTENDED_MASK (\ | ||
| 60 | SW_MAN_INT |\ | ||
| 61 | ISR_OVER |\ | ||
| 62 | ISR_ERROR |\ | ||
| 63 | ISR_GPHY_LINK |\ | ||
| 64 | ISR_TX_PKT |\ | ||
| 65 | GPHY_WAKEUP_INT) | ||
| 66 | 54 | ||
| 67 | #define ISR_TX_PKT (\ | 55 | #define ISR_TX_PKT (\ |
| 68 | TX_PKT_INT |\ | 56 | TX_PKT_INT |\ |
| @@ -70,10 +58,6 @@ | |||
| 70 | TX_PKT_INT2 |\ | 58 | TX_PKT_INT2 |\ |
| 71 | TX_PKT_INT3) | 59 | TX_PKT_INT3) |
| 72 | 60 | ||
| 73 | #define ISR_GPHY_LINK (\ | ||
| 74 | GPHY_LINK_UP_INT |\ | ||
| 75 | GPHY_LINK_DOWN_INT) | ||
| 76 | |||
| 77 | #define ISR_OVER (\ | 61 | #define ISR_OVER (\ |
| 78 | RFD0_UR_INT |\ | 62 | RFD0_UR_INT |\ |
| 79 | RFD1_UR_INT |\ | 63 | RFD1_UR_INT |\ |
| @@ -187,10 +171,6 @@ irqreturn_t emac_isr(int _irq, void *data) | |||
| 187 | if (status & ISR_OVER) | 171 | if (status & ISR_OVER) |
| 188 | net_warn_ratelimited("warning: TX/RX overflow\n"); | 172 | net_warn_ratelimited("warning: TX/RX overflow\n"); |
| 189 | 173 | ||
| 190 | /* link event */ | ||
| 191 | if (status & ISR_GPHY_LINK) | ||
| 192 | phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT)); | ||
| 193 | |||
| 194 | exit: | 174 | exit: |
| 195 | /* enable the interrupt */ | 175 | /* enable the interrupt */ |
| 196 | writel(irq->mask, adpt->base + EMAC_INT_MASK); | 176 | writel(irq->mask, adpt->base + EMAC_INT_MASK); |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 3cd7989c007d..784782da3a85 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
| @@ -230,18 +230,6 @@ static void ravb_ring_free(struct net_device *ndev, int q) | |||
| 230 | int ring_size; | 230 | int ring_size; |
| 231 | int i; | 231 | int i; |
| 232 | 232 | ||
| 233 | /* Free RX skb ringbuffer */ | ||
| 234 | if (priv->rx_skb[q]) { | ||
| 235 | for (i = 0; i < priv->num_rx_ring[q]; i++) | ||
| 236 | dev_kfree_skb(priv->rx_skb[q][i]); | ||
| 237 | } | ||
| 238 | kfree(priv->rx_skb[q]); | ||
| 239 | priv->rx_skb[q] = NULL; | ||
| 240 | |||
| 241 | /* Free aligned TX buffers */ | ||
| 242 | kfree(priv->tx_align[q]); | ||
| 243 | priv->tx_align[q] = NULL; | ||
| 244 | |||
| 245 | if (priv->rx_ring[q]) { | 233 | if (priv->rx_ring[q]) { |
| 246 | for (i = 0; i < priv->num_rx_ring[q]; i++) { | 234 | for (i = 0; i < priv->num_rx_ring[q]; i++) { |
| 247 | struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; | 235 | struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; |
| @@ -270,6 +258,18 @@ static void ravb_ring_free(struct net_device *ndev, int q) | |||
| 270 | priv->tx_ring[q] = NULL; | 258 | priv->tx_ring[q] = NULL; |
| 271 | } | 259 | } |
| 272 | 260 | ||
| 261 | /* Free RX skb ringbuffer */ | ||
| 262 | if (priv->rx_skb[q]) { | ||
| 263 | for (i = 0; i < priv->num_rx_ring[q]; i++) | ||
| 264 | dev_kfree_skb(priv->rx_skb[q][i]); | ||
| 265 | } | ||
| 266 | kfree(priv->rx_skb[q]); | ||
| 267 | priv->rx_skb[q] = NULL; | ||
| 268 | |||
| 269 | /* Free aligned TX buffers */ | ||
| 270 | kfree(priv->tx_align[q]); | ||
| 271 | priv->tx_align[q] = NULL; | ||
| 272 | |||
| 273 | /* Free TX skb ringbuffer. | 273 | /* Free TX skb ringbuffer. |
| 274 | * SKBs are freed by ravb_tx_free() call above. | 274 | * SKBs are freed by ravb_tx_free() call above. |
| 275 | */ | 275 | */ |
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 2ae852454780..a9ce82d3e9cf 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c | |||
| @@ -1505,8 +1505,8 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port, | |||
| 1505 | *index = entry->index; | 1505 | *index = entry->index; |
| 1506 | resolved = false; | 1506 | resolved = false; |
| 1507 | } else if (removing) { | 1507 | } else if (removing) { |
| 1508 | ofdpa_neigh_del(trans, found); | ||
| 1509 | *index = found->index; | 1508 | *index = found->index; |
| 1509 | ofdpa_neigh_del(trans, found); | ||
| 1510 | } else if (updating) { | 1510 | } else if (updating) { |
| 1511 | ofdpa_neigh_update(found, trans, NULL, false); | 1511 | ofdpa_neigh_update(found, trans, NULL, false); |
| 1512 | resolved = !is_zero_ether_addr(found->eth_dst); | 1512 | resolved = !is_zero_ether_addr(found->eth_dst); |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 78efb2822b86..78f9e43420e0 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
| @@ -4172,7 +4172,7 @@ found: | |||
| 4172 | * recipients | 4172 | * recipients |
| 4173 | */ | 4173 | */ |
| 4174 | if (is_mc_recip) { | 4174 | if (is_mc_recip) { |
| 4175 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | 4175 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); |
| 4176 | unsigned int depth, i; | 4176 | unsigned int depth, i; |
| 4177 | 4177 | ||
| 4178 | memset(inbuf, 0, sizeof(inbuf)); | 4178 | memset(inbuf, 0, sizeof(inbuf)); |
| @@ -4320,7 +4320,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, | |||
| 4320 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); | 4320 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); |
| 4321 | } else { | 4321 | } else { |
| 4322 | efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, | 4322 | efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, |
| 4323 | MC_CMD_FILTER_OP_IN_LEN, | 4323 | MC_CMD_FILTER_OP_EXT_IN_LEN, |
| 4324 | NULL, 0, rc); | 4324 | NULL, 0, rc); |
| 4325 | } | 4325 | } |
| 4326 | } | 4326 | } |
| @@ -4453,7 +4453,7 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, | |||
| 4453 | struct efx_filter_spec *spec) | 4453 | struct efx_filter_spec *spec) |
| 4454 | { | 4454 | { |
| 4455 | struct efx_ef10_filter_table *table = efx->filter_state; | 4455 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4456 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | 4456 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); |
| 4457 | struct efx_filter_spec *saved_spec; | 4457 | struct efx_filter_spec *saved_spec; |
| 4458 | unsigned int hash, i, depth = 1; | 4458 | unsigned int hash, i, depth = 1; |
| 4459 | bool replacing = false; | 4459 | bool replacing = false; |
| @@ -4940,7 +4940,7 @@ not_restored: | |||
| 4940 | static void efx_ef10_filter_table_remove(struct efx_nic *efx) | 4940 | static void efx_ef10_filter_table_remove(struct efx_nic *efx) |
| 4941 | { | 4941 | { |
| 4942 | struct efx_ef10_filter_table *table = efx->filter_state; | 4942 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4943 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | 4943 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); |
| 4944 | struct efx_filter_spec *spec; | 4944 | struct efx_filter_spec *spec; |
| 4945 | unsigned int filter_idx; | 4945 | unsigned int filter_idx; |
| 4946 | int rc; | 4946 | int rc; |
| @@ -5105,6 +5105,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, | |||
| 5105 | 5105 | ||
| 5106 | /* Insert/renew filters */ | 5106 | /* Insert/renew filters */ |
| 5107 | for (i = 0; i < addr_count; i++) { | 5107 | for (i = 0; i < addr_count; i++) { |
| 5108 | EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID); | ||
| 5108 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); | 5109 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
| 5109 | efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); | 5110 | efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); |
| 5110 | rc = efx_ef10_filter_insert(efx, &spec, true); | 5111 | rc = efx_ef10_filter_insert(efx, &spec, true); |
| @@ -5122,11 +5123,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, | |||
| 5122 | } | 5123 | } |
| 5123 | return rc; | 5124 | return rc; |
| 5124 | } else { | 5125 | } else { |
| 5125 | /* mark as not inserted, and carry on */ | 5126 | /* keep invalid ID, and carry on */ |
| 5126 | rc = EFX_EF10_FILTER_ID_INVALID; | ||
| 5127 | } | 5127 | } |
| 5128 | } else { | ||
| 5129 | ids[i] = efx_ef10_filter_get_unsafe_id(rc); | ||
| 5128 | } | 5130 | } |
| 5129 | ids[i] = efx_ef10_filter_get_unsafe_id(rc); | ||
| 5130 | } | 5131 | } |
| 5131 | 5132 | ||
| 5132 | if (multicast && rollback) { | 5133 | if (multicast && rollback) { |
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index b7e4345c990d..019cef1d3cf7 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c | |||
| @@ -661,8 +661,6 @@ restore_filters: | |||
| 661 | up_write(&vf->efx->filter_sem); | 661 | up_write(&vf->efx->filter_sem); |
| 662 | mutex_unlock(&vf->efx->mac_lock); | 662 | mutex_unlock(&vf->efx->mac_lock); |
| 663 | 663 | ||
| 664 | up_write(&vf->efx->filter_sem); | ||
| 665 | |||
| 666 | rc2 = efx_net_open(vf->efx->net_dev); | 664 | rc2 = efx_net_open(vf->efx->net_dev); |
| 667 | if (rc2) | 665 | if (rc2) |
| 668 | goto reset_nic; | 666 | goto reset_nic; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c index 489ef146201e..6a9c954492f2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #define TSE_PCS_CONTROL_AN_EN_MASK BIT(12) | 37 | #define TSE_PCS_CONTROL_AN_EN_MASK BIT(12) |
| 38 | #define TSE_PCS_CONTROL_REG 0x00 | 38 | #define TSE_PCS_CONTROL_REG 0x00 |
| 39 | #define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9) | 39 | #define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9) |
| 40 | #define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140 | ||
| 40 | #define TSE_PCS_IF_MODE_REG 0x28 | 41 | #define TSE_PCS_IF_MODE_REG 0x28 |
| 41 | #define TSE_PCS_LINK_TIMER_0_REG 0x24 | 42 | #define TSE_PCS_LINK_TIMER_0_REG 0x24 |
| 42 | #define TSE_PCS_LINK_TIMER_1_REG 0x26 | 43 | #define TSE_PCS_LINK_TIMER_1_REG 0x26 |
| @@ -65,6 +66,7 @@ | |||
| 65 | #define TSE_PCS_SW_RESET_TIMEOUT 100 | 66 | #define TSE_PCS_SW_RESET_TIMEOUT 100 |
| 66 | #define TSE_PCS_USE_SGMII_AN_MASK BIT(1) | 67 | #define TSE_PCS_USE_SGMII_AN_MASK BIT(1) |
| 67 | #define TSE_PCS_USE_SGMII_ENA BIT(0) | 68 | #define TSE_PCS_USE_SGMII_ENA BIT(0) |
| 69 | #define TSE_PCS_IF_USE_SGMII 0x03 | ||
| 68 | 70 | ||
| 69 | #define SGMII_ADAPTER_CTRL_REG 0x00 | 71 | #define SGMII_ADAPTER_CTRL_REG 0x00 |
| 70 | #define SGMII_ADAPTER_DISABLE 0x0001 | 72 | #define SGMII_ADAPTER_DISABLE 0x0001 |
| @@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs) | |||
| 101 | { | 103 | { |
| 102 | int ret = 0; | 104 | int ret = 0; |
| 103 | 105 | ||
| 104 | writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG); | 106 | writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG); |
| 107 | |||
| 108 | writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG); | ||
| 105 | 109 | ||
| 106 | writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG); | 110 | writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG); |
| 107 | writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG); | 111 | writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index aa6476439aee..e0ef02f9503b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | |||
| @@ -214,13 +214,13 @@ static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) | |||
| 214 | { | 214 | { |
| 215 | /* Context type from W/B descriptor must be zero */ | 215 | /* Context type from W/B descriptor must be zero */ |
| 216 | if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) | 216 | if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) |
| 217 | return -EINVAL; | 217 | return 0; |
| 218 | 218 | ||
| 219 | /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ | 219 | /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ |
| 220 | if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) | 220 | if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) |
| 221 | return 0; | 221 | return 1; |
| 222 | 222 | ||
| 223 | return 1; | 223 | return 0; |
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) | 226 | static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) |
| @@ -282,7 +282,10 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) | |||
| 282 | } | 282 | } |
| 283 | } | 283 | } |
| 284 | exit: | 284 | exit: |
| 285 | return ret; | 285 | if (likely(ret == 0)) |
| 286 | return 1; | ||
| 287 | |||
| 288 | return 0; | ||
| 286 | } | 289 | } |
| 287 | 290 | ||
| 288 | static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, | 291 | static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a74c481401c4..6e4cbc6ce0ef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -434,14 +434,14 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, | |||
| 434 | return; | 434 | return; |
| 435 | 435 | ||
| 436 | /* check tx tstamp status */ | 436 | /* check tx tstamp status */ |
| 437 | if (!priv->hw->desc->get_tx_timestamp_status(p)) { | 437 | if (priv->hw->desc->get_tx_timestamp_status(p)) { |
| 438 | /* get the valid tstamp */ | 438 | /* get the valid tstamp */ |
| 439 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); | 439 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); |
| 440 | 440 | ||
| 441 | memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); | 441 | memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
| 442 | shhwtstamp.hwtstamp = ns_to_ktime(ns); | 442 | shhwtstamp.hwtstamp = ns_to_ktime(ns); |
| 443 | 443 | ||
| 444 | netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); | 444 | netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); |
| 445 | /* pass tstamp to stack */ | 445 | /* pass tstamp to stack */ |
| 446 | skb_tstamp_tx(skb, &shhwtstamp); | 446 | skb_tstamp_tx(skb, &shhwtstamp); |
| 447 | } | 447 | } |
| @@ -468,19 +468,19 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, | |||
| 468 | return; | 468 | return; |
| 469 | 469 | ||
| 470 | /* Check if timestamp is available */ | 470 | /* Check if timestamp is available */ |
| 471 | if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { | 471 | if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { |
| 472 | /* For GMAC4, the valid timestamp is from CTX next desc. */ | 472 | /* For GMAC4, the valid timestamp is from CTX next desc. */ |
| 473 | if (priv->plat->has_gmac4) | 473 | if (priv->plat->has_gmac4) |
| 474 | ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); | 474 | ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); |
| 475 | else | 475 | else |
| 476 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); | 476 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); |
| 477 | 477 | ||
| 478 | netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); | 478 | netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); |
| 479 | shhwtstamp = skb_hwtstamps(skb); | 479 | shhwtstamp = skb_hwtstamps(skb); |
| 480 | memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); | 480 | memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
| 481 | shhwtstamp->hwtstamp = ns_to_ktime(ns); | 481 | shhwtstamp->hwtstamp = ns_to_ktime(ns); |
| 482 | } else { | 482 | } else { |
| 483 | netdev_err(priv->dev, "cannot get RX hw timestamp\n"); | 483 | netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); |
| 484 | } | 484 | } |
| 485 | } | 485 | } |
| 486 | 486 | ||
| @@ -546,7 +546,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
| 546 | /* PTP v1, UDP, any kind of event packet */ | 546 | /* PTP v1, UDP, any kind of event packet */ |
| 547 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; | 547 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; |
| 548 | /* take time stamp for all event messages */ | 548 | /* take time stamp for all event messages */ |
| 549 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | 549 | if (priv->plat->has_gmac4) |
| 550 | snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; | ||
| 551 | else | ||
| 552 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | ||
| 550 | 553 | ||
| 551 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | 554 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
| 552 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | 555 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
| @@ -578,7 +581,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
| 578 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; | 581 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; |
| 579 | ptp_v2 = PTP_TCR_TSVER2ENA; | 582 | ptp_v2 = PTP_TCR_TSVER2ENA; |
| 580 | /* take time stamp for all event messages */ | 583 | /* take time stamp for all event messages */ |
| 581 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | 584 | if (priv->plat->has_gmac4) |
| 585 | snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; | ||
| 586 | else | ||
| 587 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | ||
| 582 | 588 | ||
| 583 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | 589 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
| 584 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | 590 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
| @@ -612,7 +618,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
| 612 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; | 618 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; |
| 613 | ptp_v2 = PTP_TCR_TSVER2ENA; | 619 | ptp_v2 = PTP_TCR_TSVER2ENA; |
| 614 | /* take time stamp for all event messages */ | 620 | /* take time stamp for all event messages */ |
| 615 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | 621 | if (priv->plat->has_gmac4) |
| 622 | snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; | ||
| 623 | else | ||
| 624 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | ||
| 616 | 625 | ||
| 617 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | 626 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
| 618 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | 627 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
| @@ -1208,7 +1217,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) | |||
| 1208 | u32 rx_count = priv->plat->rx_queues_to_use; | 1217 | u32 rx_count = priv->plat->rx_queues_to_use; |
| 1209 | unsigned int bfsize = 0; | 1218 | unsigned int bfsize = 0; |
| 1210 | int ret = -ENOMEM; | 1219 | int ret = -ENOMEM; |
| 1211 | u32 queue; | 1220 | int queue; |
| 1212 | int i; | 1221 | int i; |
| 1213 | 1222 | ||
| 1214 | if (priv->hw->mode->set_16kib_bfsize) | 1223 | if (priv->hw->mode->set_16kib_bfsize) |
| @@ -2724,7 +2733,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, | |||
| 2724 | 2733 | ||
| 2725 | priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, | 2734 | priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, |
| 2726 | 0, 1, | 2735 | 0, 1, |
| 2727 | (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE), | 2736 | (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), |
| 2728 | 0, 0); | 2737 | 0, 0); |
| 2729 | 2738 | ||
| 2730 | tmp_len -= TSO_MAX_BUFF_SIZE; | 2739 | tmp_len -= TSO_MAX_BUFF_SIZE; |
| @@ -2822,7 +2831,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2822 | 2831 | ||
| 2823 | tx_q->tx_skbuff_dma[first_entry].buf = des; | 2832 | tx_q->tx_skbuff_dma[first_entry].buf = des; |
| 2824 | tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); | 2833 | tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); |
| 2825 | tx_q->tx_skbuff[first_entry] = skb; | ||
| 2826 | 2834 | ||
| 2827 | first->des0 = cpu_to_le32(des); | 2835 | first->des0 = cpu_to_le32(des); |
| 2828 | 2836 | ||
| @@ -2856,6 +2864,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2856 | 2864 | ||
| 2857 | tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; | 2865 | tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; |
| 2858 | 2866 | ||
| 2867 | /* Only the last descriptor gets to point to the skb. */ | ||
| 2868 | tx_q->tx_skbuff[tx_q->cur_tx] = skb; | ||
| 2869 | |||
| 2870 | /* We've used all descriptors we need for this skb, however, | ||
| 2871 | * advance cur_tx so that it references a fresh descriptor. | ||
| 2872 | * ndo_start_xmit will fill this descriptor the next time it's | ||
| 2873 | * called and stmmac_tx_clean may clean up to this descriptor. | ||
| 2874 | */ | ||
| 2859 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); | 2875 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
| 2860 | 2876 | ||
| 2861 | if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { | 2877 | if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { |
| @@ -2947,7 +2963,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2947 | int i, csum_insertion = 0, is_jumbo = 0; | 2963 | int i, csum_insertion = 0, is_jumbo = 0; |
| 2948 | u32 queue = skb_get_queue_mapping(skb); | 2964 | u32 queue = skb_get_queue_mapping(skb); |
| 2949 | int nfrags = skb_shinfo(skb)->nr_frags; | 2965 | int nfrags = skb_shinfo(skb)->nr_frags; |
| 2950 | unsigned int entry, first_entry; | 2966 | int entry; |
| 2967 | unsigned int first_entry; | ||
| 2951 | struct dma_desc *desc, *first; | 2968 | struct dma_desc *desc, *first; |
| 2952 | struct stmmac_tx_queue *tx_q; | 2969 | struct stmmac_tx_queue *tx_q; |
| 2953 | unsigned int enh_desc; | 2970 | unsigned int enh_desc; |
| @@ -2988,8 +3005,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2988 | 3005 | ||
| 2989 | first = desc; | 3006 | first = desc; |
| 2990 | 3007 | ||
| 2991 | tx_q->tx_skbuff[first_entry] = skb; | ||
| 2992 | |||
| 2993 | enh_desc = priv->plat->enh_desc; | 3008 | enh_desc = priv->plat->enh_desc; |
| 2994 | /* To program the descriptors according to the size of the frame */ | 3009 | /* To program the descriptors according to the size of the frame */ |
| 2995 | if (enh_desc) | 3010 | if (enh_desc) |
| @@ -3037,8 +3052,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3037 | skb->len); | 3052 | skb->len); |
| 3038 | } | 3053 | } |
| 3039 | 3054 | ||
| 3040 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); | 3055 | /* Only the last descriptor gets to point to the skb. */ |
| 3056 | tx_q->tx_skbuff[entry] = skb; | ||
| 3041 | 3057 | ||
| 3058 | /* We've used all descriptors we need for this skb, however, | ||
| 3059 | * advance cur_tx so that it references a fresh descriptor. | ||
| 3060 | * ndo_start_xmit will fill this descriptor the next time it's | ||
| 3061 | * called and stmmac_tx_clean may clean up to this descriptor. | ||
| 3062 | */ | ||
| 3063 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); | ||
| 3042 | tx_q->cur_tx = entry; | 3064 | tx_q->cur_tx = entry; |
| 3043 | 3065 | ||
| 3044 | if (netif_msg_pktdata(priv)) { | 3066 | if (netif_msg_pktdata(priv)) { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 48fb72fc423c..f4b31d69f60e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h | |||
| @@ -59,7 +59,8 @@ | |||
| 59 | /* Enable Snapshot for Messages Relevant to Master */ | 59 | /* Enable Snapshot for Messages Relevant to Master */ |
| 60 | #define PTP_TCR_TSMSTRENA BIT(15) | 60 | #define PTP_TCR_TSMSTRENA BIT(15) |
| 61 | /* Select PTP packets for Taking Snapshots */ | 61 | /* Select PTP packets for Taking Snapshots */ |
| 62 | #define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16) | 62 | #define PTP_TCR_SNAPTYPSEL_1 BIT(16) |
| 63 | #define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16) | ||
| 63 | /* Enable MAC address for PTP Frame Filtering */ | 64 | /* Enable MAC address for PTP Frame Filtering */ |
| 64 | #define PTP_TCR_TSENMACADDR BIT(18) | 65 | #define PTP_TCR_TSENMACADDR BIT(18) |
| 65 | 66 | ||
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c index 1562ab4151e1..56ba411421f0 100644 --- a/drivers/net/ethernet/ti/cpsw-common.c +++ b/drivers/net/ethernet/ti/cpsw-common.c | |||
| @@ -90,7 +90,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr) | |||
| 90 | if (of_device_is_compatible(dev->of_node, "ti,dm816-emac")) | 90 | if (of_device_is_compatible(dev->of_node, "ti,dm816-emac")) |
| 91 | return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr); | 91 | return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr); |
| 92 | 92 | ||
| 93 | if (of_machine_is_compatible("ti,am4372")) | 93 | if (of_machine_is_compatible("ti,am43")) |
| 94 | return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); | 94 | return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); |
| 95 | 95 | ||
| 96 | if (of_machine_is_compatible("ti,dra7")) | 96 | if (of_machine_is_compatible("ti,dra7")) |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 959fd12d2e67..199459bd6961 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
| @@ -1007,7 +1007,7 @@ static void geneve_setup(struct net_device *dev) | |||
| 1007 | 1007 | ||
| 1008 | dev->netdev_ops = &geneve_netdev_ops; | 1008 | dev->netdev_ops = &geneve_netdev_ops; |
| 1009 | dev->ethtool_ops = &geneve_ethtool_ops; | 1009 | dev->ethtool_ops = &geneve_ethtool_ops; |
| 1010 | dev->destructor = free_netdev; | 1010 | dev->needs_free_netdev = true; |
| 1011 | 1011 | ||
| 1012 | SET_NETDEV_DEVTYPE(dev, &geneve_type); | 1012 | SET_NETDEV_DEVTYPE(dev, &geneve_type); |
| 1013 | 1013 | ||
| @@ -1133,7 +1133,7 @@ static int geneve_configure(struct net *net, struct net_device *dev, | |||
| 1133 | 1133 | ||
| 1134 | /* make enough headroom for basic scenario */ | 1134 | /* make enough headroom for basic scenario */ |
| 1135 | encap_len = GENEVE_BASE_HLEN + ETH_HLEN; | 1135 | encap_len = GENEVE_BASE_HLEN + ETH_HLEN; |
| 1136 | if (ip_tunnel_info_af(info) == AF_INET) { | 1136 | if (!metadata && ip_tunnel_info_af(info) == AF_INET) { |
| 1137 | encap_len += sizeof(struct iphdr); | 1137 | encap_len += sizeof(struct iphdr); |
| 1138 | dev->max_mtu -= sizeof(struct iphdr); | 1138 | dev->max_mtu -= sizeof(struct iphdr); |
| 1139 | } else { | 1139 | } else { |
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 7b652bb7ebe4..ca110cd2a4e4 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c | |||
| @@ -611,7 +611,7 @@ static const struct net_device_ops gtp_netdev_ops = { | |||
| 611 | static void gtp_link_setup(struct net_device *dev) | 611 | static void gtp_link_setup(struct net_device *dev) |
| 612 | { | 612 | { |
| 613 | dev->netdev_ops = >p_netdev_ops; | 613 | dev->netdev_ops = >p_netdev_ops; |
| 614 | dev->destructor = free_netdev; | 614 | dev->needs_free_netdev = true; |
| 615 | 615 | ||
| 616 | dev->hard_header_len = 0; | 616 | dev->hard_header_len = 0; |
| 617 | dev->addr_len = 0; | 617 | dev->addr_len = 0; |
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 922bf440e9f1..021a8ec411ab 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c | |||
| @@ -311,7 +311,7 @@ static void sp_setup(struct net_device *dev) | |||
| 311 | { | 311 | { |
| 312 | /* Finish setting up the DEVICE info. */ | 312 | /* Finish setting up the DEVICE info. */ |
| 313 | dev->netdev_ops = &sp_netdev_ops; | 313 | dev->netdev_ops = &sp_netdev_ops; |
| 314 | dev->destructor = free_netdev; | 314 | dev->needs_free_netdev = true; |
| 315 | dev->mtu = SIXP_MTU; | 315 | dev->mtu = SIXP_MTU; |
| 316 | dev->hard_header_len = AX25_MAX_HEADER_LEN; | 316 | dev->hard_header_len = AX25_MAX_HEADER_LEN; |
| 317 | dev->header_ops = &ax25_header_ops; | 317 | dev->header_ops = &ax25_header_ops; |
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index f62e7f325cf9..78a6414c5fd9 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c | |||
| @@ -476,7 +476,7 @@ static const struct net_device_ops bpq_netdev_ops = { | |||
| 476 | static void bpq_setup(struct net_device *dev) | 476 | static void bpq_setup(struct net_device *dev) |
| 477 | { | 477 | { |
| 478 | dev->netdev_ops = &bpq_netdev_ops; | 478 | dev->netdev_ops = &bpq_netdev_ops; |
| 479 | dev->destructor = free_netdev; | 479 | dev->needs_free_netdev = true; |
| 480 | 480 | ||
| 481 | memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); | 481 | memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); |
| 482 | memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); | 482 | memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); |
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index 8c3633c1d078..97e3bc60c3e7 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c | |||
| @@ -576,6 +576,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 576 | case HDLCDRVCTL_CALIBRATE: | 576 | case HDLCDRVCTL_CALIBRATE: |
| 577 | if(!capable(CAP_SYS_RAWIO)) | 577 | if(!capable(CAP_SYS_RAWIO)) |
| 578 | return -EPERM; | 578 | return -EPERM; |
| 579 | if (s->par.bitrate <= 0) | ||
| 580 | return -EINVAL; | ||
| 579 | if (bi.data.calibrate > INT_MAX / s->par.bitrate) | 581 | if (bi.data.calibrate > INT_MAX / s->par.bitrate) |
| 580 | return -EINVAL; | 582 | return -EINVAL; |
| 581 | s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; | 583 | s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 262b2ea576a3..6066f1bcaf2d 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
| @@ -171,6 +171,8 @@ struct rndis_device { | |||
| 171 | spinlock_t request_lock; | 171 | spinlock_t request_lock; |
| 172 | struct list_head req_list; | 172 | struct list_head req_list; |
| 173 | 173 | ||
| 174 | struct work_struct mcast_work; | ||
| 175 | |||
| 174 | u8 hw_mac_adr[ETH_ALEN]; | 176 | u8 hw_mac_adr[ETH_ALEN]; |
| 175 | u8 rss_key[NETVSC_HASH_KEYLEN]; | 177 | u8 rss_key[NETVSC_HASH_KEYLEN]; |
| 176 | u16 ind_table[ITAB_NUM]; | 178 | u16 ind_table[ITAB_NUM]; |
| @@ -201,6 +203,7 @@ int rndis_filter_open(struct netvsc_device *nvdev); | |||
| 201 | int rndis_filter_close(struct netvsc_device *nvdev); | 203 | int rndis_filter_close(struct netvsc_device *nvdev); |
| 202 | int rndis_filter_device_add(struct hv_device *dev, | 204 | int rndis_filter_device_add(struct hv_device *dev, |
| 203 | struct netvsc_device_info *info); | 205 | struct netvsc_device_info *info); |
| 206 | void rndis_filter_update(struct netvsc_device *nvdev); | ||
| 204 | void rndis_filter_device_remove(struct hv_device *dev, | 207 | void rndis_filter_device_remove(struct hv_device *dev, |
| 205 | struct netvsc_device *nvdev); | 208 | struct netvsc_device *nvdev); |
| 206 | int rndis_filter_set_rss_param(struct rndis_device *rdev, | 209 | int rndis_filter_set_rss_param(struct rndis_device *rdev, |
| @@ -211,7 +214,6 @@ int rndis_filter_receive(struct net_device *ndev, | |||
| 211 | struct vmbus_channel *channel, | 214 | struct vmbus_channel *channel, |
| 212 | void *data, u32 buflen); | 215 | void *data, u32 buflen); |
| 213 | 216 | ||
| 214 | int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); | ||
| 215 | int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); | 217 | int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); |
| 216 | 218 | ||
| 217 | void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); | 219 | void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); |
| @@ -696,7 +698,6 @@ struct net_device_context { | |||
| 696 | /* list protection */ | 698 | /* list protection */ |
| 697 | spinlock_t lock; | 699 | spinlock_t lock; |
| 698 | 700 | ||
| 699 | struct work_struct work; | ||
| 700 | u32 msg_enable; /* debug level */ | 701 | u32 msg_enable; /* debug level */ |
| 701 | 702 | ||
| 702 | u32 tx_checksum_mask; | 703 | u32 tx_checksum_mask; |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 4421a6d00375..643c539a08ba 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -56,37 +56,12 @@ static int debug = -1; | |||
| 56 | module_param(debug, int, S_IRUGO); | 56 | module_param(debug, int, S_IRUGO); |
| 57 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | 57 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
| 58 | 58 | ||
| 59 | static void do_set_multicast(struct work_struct *w) | ||
| 60 | { | ||
| 61 | struct net_device_context *ndevctx = | ||
| 62 | container_of(w, struct net_device_context, work); | ||
| 63 | struct hv_device *device_obj = ndevctx->device_ctx; | ||
| 64 | struct net_device *ndev = hv_get_drvdata(device_obj); | ||
| 65 | struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev); | ||
| 66 | struct rndis_device *rdev; | ||
| 67 | |||
| 68 | if (!nvdev) | ||
| 69 | return; | ||
| 70 | |||
| 71 | rdev = nvdev->extension; | ||
| 72 | if (rdev == NULL) | ||
| 73 | return; | ||
| 74 | |||
| 75 | if (ndev->flags & IFF_PROMISC) | ||
| 76 | rndis_filter_set_packet_filter(rdev, | ||
| 77 | NDIS_PACKET_TYPE_PROMISCUOUS); | ||
| 78 | else | ||
| 79 | rndis_filter_set_packet_filter(rdev, | ||
| 80 | NDIS_PACKET_TYPE_BROADCAST | | ||
| 81 | NDIS_PACKET_TYPE_ALL_MULTICAST | | ||
| 82 | NDIS_PACKET_TYPE_DIRECTED); | ||
| 83 | } | ||
| 84 | |||
| 85 | static void netvsc_set_multicast_list(struct net_device *net) | 59 | static void netvsc_set_multicast_list(struct net_device *net) |
| 86 | { | 60 | { |
| 87 | struct net_device_context *net_device_ctx = netdev_priv(net); | 61 | struct net_device_context *net_device_ctx = netdev_priv(net); |
| 62 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); | ||
| 88 | 63 | ||
| 89 | schedule_work(&net_device_ctx->work); | 64 | rndis_filter_update(nvdev); |
| 90 | } | 65 | } |
| 91 | 66 | ||
| 92 | static int netvsc_open(struct net_device *net) | 67 | static int netvsc_open(struct net_device *net) |
| @@ -123,8 +98,6 @@ static int netvsc_close(struct net_device *net) | |||
| 123 | 98 | ||
| 124 | netif_tx_disable(net); | 99 | netif_tx_disable(net); |
| 125 | 100 | ||
| 126 | /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ | ||
| 127 | cancel_work_sync(&net_device_ctx->work); | ||
| 128 | ret = rndis_filter_close(nvdev); | 101 | ret = rndis_filter_close(nvdev); |
| 129 | if (ret != 0) { | 102 | if (ret != 0) { |
| 130 | netdev_err(net, "unable to close device (ret %d).\n", ret); | 103 | netdev_err(net, "unable to close device (ret %d).\n", ret); |
| @@ -803,7 +776,7 @@ static int netvsc_set_channels(struct net_device *net, | |||
| 803 | channels->rx_count || channels->tx_count || channels->other_count) | 776 | channels->rx_count || channels->tx_count || channels->other_count) |
| 804 | return -EINVAL; | 777 | return -EINVAL; |
| 805 | 778 | ||
| 806 | if (count > net->num_tx_queues || count > net->num_rx_queues) | 779 | if (count > net->num_tx_queues || count > VRSS_CHANNEL_MAX) |
| 807 | return -EINVAL; | 780 | return -EINVAL; |
| 808 | 781 | ||
| 809 | if (!nvdev || nvdev->destroy) | 782 | if (!nvdev || nvdev->destroy) |
| @@ -1028,7 +1001,7 @@ static const struct { | |||
| 1028 | static int netvsc_get_sset_count(struct net_device *dev, int string_set) | 1001 | static int netvsc_get_sset_count(struct net_device *dev, int string_set) |
| 1029 | { | 1002 | { |
| 1030 | struct net_device_context *ndc = netdev_priv(dev); | 1003 | struct net_device_context *ndc = netdev_priv(dev); |
| 1031 | struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); | 1004 | struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); |
| 1032 | 1005 | ||
| 1033 | if (!nvdev) | 1006 | if (!nvdev) |
| 1034 | return -ENODEV; | 1007 | return -ENODEV; |
| @@ -1158,11 +1131,22 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, | |||
| 1158 | } | 1131 | } |
| 1159 | 1132 | ||
| 1160 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1133 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1161 | static void netvsc_poll_controller(struct net_device *net) | 1134 | static void netvsc_poll_controller(struct net_device *dev) |
| 1162 | { | 1135 | { |
| 1163 | /* As netvsc_start_xmit() works synchronous we don't have to | 1136 | struct net_device_context *ndc = netdev_priv(dev); |
| 1164 | * trigger anything here. | 1137 | struct netvsc_device *ndev; |
| 1165 | */ | 1138 | int i; |
| 1139 | |||
| 1140 | rcu_read_lock(); | ||
| 1141 | ndev = rcu_dereference(ndc->nvdev); | ||
| 1142 | if (ndev) { | ||
| 1143 | for (i = 0; i < ndev->num_chn; i++) { | ||
| 1144 | struct netvsc_channel *nvchan = &ndev->chan_table[i]; | ||
| 1145 | |||
| 1146 | napi_schedule(&nvchan->napi); | ||
| 1147 | } | ||
| 1148 | } | ||
| 1149 | rcu_read_unlock(); | ||
| 1166 | } | 1150 | } |
| 1167 | #endif | 1151 | #endif |
| 1168 | 1152 | ||
| @@ -1219,7 +1203,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, | |||
| 1219 | rndis_dev = ndev->extension; | 1203 | rndis_dev = ndev->extension; |
| 1220 | if (indir) { | 1204 | if (indir) { |
| 1221 | for (i = 0; i < ITAB_NUM; i++) | 1205 | for (i = 0; i < ITAB_NUM; i++) |
| 1222 | if (indir[i] >= dev->num_rx_queues) | 1206 | if (indir[i] >= VRSS_CHANNEL_MAX) |
| 1223 | return -EINVAL; | 1207 | return -EINVAL; |
| 1224 | 1208 | ||
| 1225 | for (i = 0; i < ITAB_NUM; i++) | 1209 | for (i = 0; i < ITAB_NUM; i++) |
| @@ -1552,7 +1536,6 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 1552 | hv_set_drvdata(dev, net); | 1536 | hv_set_drvdata(dev, net); |
| 1553 | 1537 | ||
| 1554 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); | 1538 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); |
| 1555 | INIT_WORK(&net_device_ctx->work, do_set_multicast); | ||
| 1556 | 1539 | ||
| 1557 | spin_lock_init(&net_device_ctx->lock); | 1540 | spin_lock_init(&net_device_ctx->lock); |
| 1558 | INIT_LIST_HEAD(&net_device_ctx->reconfig_events); | 1541 | INIT_LIST_HEAD(&net_device_ctx->reconfig_events); |
| @@ -1622,7 +1605,6 @@ static int netvsc_remove(struct hv_device *dev) | |||
| 1622 | netif_device_detach(net); | 1605 | netif_device_detach(net); |
| 1623 | 1606 | ||
| 1624 | cancel_delayed_work_sync(&ndev_ctx->dwork); | 1607 | cancel_delayed_work_sync(&ndev_ctx->dwork); |
| 1625 | cancel_work_sync(&ndev_ctx->work); | ||
| 1626 | 1608 | ||
| 1627 | /* | 1609 | /* |
| 1628 | * Call to the vsc driver to let it know that the device is being | 1610 | * Call to the vsc driver to let it know that the device is being |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index f9d5b0b8209a..cb79cd081f42 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | 31 | ||
| 32 | #include "hyperv_net.h" | 32 | #include "hyperv_net.h" |
| 33 | 33 | ||
| 34 | static void rndis_set_multicast(struct work_struct *w); | ||
| 34 | 35 | ||
| 35 | #define RNDIS_EXT_LEN PAGE_SIZE | 36 | #define RNDIS_EXT_LEN PAGE_SIZE |
| 36 | struct rndis_request { | 37 | struct rndis_request { |
| @@ -76,6 +77,7 @@ static struct rndis_device *get_rndis_device(void) | |||
| 76 | spin_lock_init(&device->request_lock); | 77 | spin_lock_init(&device->request_lock); |
| 77 | 78 | ||
| 78 | INIT_LIST_HEAD(&device->req_list); | 79 | INIT_LIST_HEAD(&device->req_list); |
| 80 | INIT_WORK(&device->mcast_work, rndis_set_multicast); | ||
| 79 | 81 | ||
| 80 | device->state = RNDIS_DEV_UNINITIALIZED; | 82 | device->state = RNDIS_DEV_UNINITIALIZED; |
| 81 | 83 | ||
| @@ -815,7 +817,8 @@ static int rndis_filter_query_link_speed(struct rndis_device *dev) | |||
| 815 | return ret; | 817 | return ret; |
| 816 | } | 818 | } |
| 817 | 819 | ||
| 818 | int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) | 820 | static int rndis_filter_set_packet_filter(struct rndis_device *dev, |
| 821 | u32 new_filter) | ||
| 819 | { | 822 | { |
| 820 | struct rndis_request *request; | 823 | struct rndis_request *request; |
| 821 | struct rndis_set_request *set; | 824 | struct rndis_set_request *set; |
| @@ -846,6 +849,28 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) | |||
| 846 | return ret; | 849 | return ret; |
| 847 | } | 850 | } |
| 848 | 851 | ||
| 852 | static void rndis_set_multicast(struct work_struct *w) | ||
| 853 | { | ||
| 854 | struct rndis_device *rdev | ||
| 855 | = container_of(w, struct rndis_device, mcast_work); | ||
| 856 | |||
| 857 | if (rdev->ndev->flags & IFF_PROMISC) | ||
| 858 | rndis_filter_set_packet_filter(rdev, | ||
| 859 | NDIS_PACKET_TYPE_PROMISCUOUS); | ||
| 860 | else | ||
| 861 | rndis_filter_set_packet_filter(rdev, | ||
| 862 | NDIS_PACKET_TYPE_BROADCAST | | ||
| 863 | NDIS_PACKET_TYPE_ALL_MULTICAST | | ||
| 864 | NDIS_PACKET_TYPE_DIRECTED); | ||
| 865 | } | ||
| 866 | |||
| 867 | void rndis_filter_update(struct netvsc_device *nvdev) | ||
| 868 | { | ||
| 869 | struct rndis_device *rdev = nvdev->extension; | ||
| 870 | |||
| 871 | schedule_work(&rdev->mcast_work); | ||
| 872 | } | ||
| 873 | |||
| 849 | static int rndis_filter_init_device(struct rndis_device *dev) | 874 | static int rndis_filter_init_device(struct rndis_device *dev) |
| 850 | { | 875 | { |
| 851 | struct rndis_request *request; | 876 | struct rndis_request *request; |
| @@ -973,6 +998,9 @@ static int rndis_filter_close_device(struct rndis_device *dev) | |||
| 973 | if (dev->state != RNDIS_DEV_DATAINITIALIZED) | 998 | if (dev->state != RNDIS_DEV_DATAINITIALIZED) |
| 974 | return 0; | 999 | return 0; |
| 975 | 1000 | ||
| 1001 | /* Make sure rndis_set_multicast doesn't re-enable filter! */ | ||
| 1002 | cancel_work_sync(&dev->mcast_work); | ||
| 1003 | |||
| 976 | ret = rndis_filter_set_packet_filter(dev, 0); | 1004 | ret = rndis_filter_set_packet_filter(dev, 0); |
| 977 | if (ret == -ENODEV) | 1005 | if (ret == -ENODEV) |
| 978 | ret = 0; | 1006 | ret = 0; |
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 312fce7302d3..144ea5ae8ab4 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c | |||
| @@ -207,7 +207,6 @@ static void ifb_dev_free(struct net_device *dev) | |||
| 207 | __skb_queue_purge(&txp->tq); | 207 | __skb_queue_purge(&txp->tq); |
| 208 | } | 208 | } |
| 209 | kfree(dp->tx_private); | 209 | kfree(dp->tx_private); |
| 210 | free_netdev(dev); | ||
| 211 | } | 210 | } |
| 212 | 211 | ||
| 213 | static void ifb_setup(struct net_device *dev) | 212 | static void ifb_setup(struct net_device *dev) |
| @@ -230,7 +229,8 @@ static void ifb_setup(struct net_device *dev) | |||
| 230 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 229 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
| 231 | netif_keep_dst(dev); | 230 | netif_keep_dst(dev); |
| 232 | eth_hw_addr_random(dev); | 231 | eth_hw_addr_random(dev); |
| 233 | dev->destructor = ifb_dev_free; | 232 | dev->needs_free_netdev = true; |
| 233 | dev->priv_destructor = ifb_dev_free; | ||
| 234 | } | 234 | } |
| 235 | 235 | ||
| 236 | static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) | 236 | static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) |
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 618ed88fad0f..7c7680c8f0e3 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
| @@ -632,7 +632,7 @@ void ipvlan_link_setup(struct net_device *dev) | |||
| 632 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); | 632 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); |
| 633 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; | 633 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; |
| 634 | dev->netdev_ops = &ipvlan_netdev_ops; | 634 | dev->netdev_ops = &ipvlan_netdev_ops; |
| 635 | dev->destructor = free_netdev; | 635 | dev->needs_free_netdev = true; |
| 636 | dev->header_ops = &ipvlan_header_ops; | 636 | dev->header_ops = &ipvlan_header_ops; |
| 637 | dev->ethtool_ops = &ipvlan_ethtool_ops; | 637 | dev->ethtool_ops = &ipvlan_ethtool_ops; |
| 638 | } | 638 | } |
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 224f65cb576b..30612497643c 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
| @@ -159,7 +159,6 @@ static void loopback_dev_free(struct net_device *dev) | |||
| 159 | { | 159 | { |
| 160 | dev_net(dev)->loopback_dev = NULL; | 160 | dev_net(dev)->loopback_dev = NULL; |
| 161 | free_percpu(dev->lstats); | 161 | free_percpu(dev->lstats); |
| 162 | free_netdev(dev); | ||
| 163 | } | 162 | } |
| 164 | 163 | ||
| 165 | static const struct net_device_ops loopback_ops = { | 164 | static const struct net_device_ops loopback_ops = { |
| @@ -196,7 +195,8 @@ static void loopback_setup(struct net_device *dev) | |||
| 196 | dev->ethtool_ops = &loopback_ethtool_ops; | 195 | dev->ethtool_ops = &loopback_ethtool_ops; |
| 197 | dev->header_ops = ð_header_ops; | 196 | dev->header_ops = ð_header_ops; |
| 198 | dev->netdev_ops = &loopback_ops; | 197 | dev->netdev_ops = &loopback_ops; |
| 199 | dev->destructor = loopback_dev_free; | 198 | dev->needs_free_netdev = true; |
| 199 | dev->priv_destructor = loopback_dev_free; | ||
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | /* Setup and register the loopback device. */ | 202 | /* Setup and register the loopback device. */ |
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index cdc347be68f2..79411675f0e6 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c | |||
| @@ -2996,7 +2996,6 @@ static void macsec_free_netdev(struct net_device *dev) | |||
| 2996 | free_percpu(macsec->secy.tx_sc.stats); | 2996 | free_percpu(macsec->secy.tx_sc.stats); |
| 2997 | 2997 | ||
| 2998 | dev_put(real_dev); | 2998 | dev_put(real_dev); |
| 2999 | free_netdev(dev); | ||
| 3000 | } | 2999 | } |
| 3001 | 3000 | ||
| 3002 | static void macsec_setup(struct net_device *dev) | 3001 | static void macsec_setup(struct net_device *dev) |
| @@ -3006,7 +3005,8 @@ static void macsec_setup(struct net_device *dev) | |||
| 3006 | dev->max_mtu = ETH_MAX_MTU; | 3005 | dev->max_mtu = ETH_MAX_MTU; |
| 3007 | dev->priv_flags |= IFF_NO_QUEUE; | 3006 | dev->priv_flags |= IFF_NO_QUEUE; |
| 3008 | dev->netdev_ops = &macsec_netdev_ops; | 3007 | dev->netdev_ops = &macsec_netdev_ops; |
| 3009 | dev->destructor = macsec_free_netdev; | 3008 | dev->needs_free_netdev = true; |
| 3009 | dev->priv_destructor = macsec_free_netdev; | ||
| 3010 | SET_NETDEV_DEVTYPE(dev, &macsec_type); | 3010 | SET_NETDEV_DEVTYPE(dev, &macsec_type); |
| 3011 | 3011 | ||
| 3012 | eth_zero_addr(dev->broadcast); | 3012 | eth_zero_addr(dev->broadcast); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 346ad2ff3998..72b801803aa4 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
| @@ -39,16 +39,20 @@ | |||
| 39 | #define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS) | 39 | #define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS) |
| 40 | #define MACVLAN_BC_QUEUE_LEN 1000 | 40 | #define MACVLAN_BC_QUEUE_LEN 1000 |
| 41 | 41 | ||
| 42 | #define MACVLAN_F_PASSTHRU 1 | ||
| 43 | #define MACVLAN_F_ADDRCHANGE 2 | ||
| 44 | |||
| 42 | struct macvlan_port { | 45 | struct macvlan_port { |
| 43 | struct net_device *dev; | 46 | struct net_device *dev; |
| 44 | struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; | 47 | struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; |
| 45 | struct list_head vlans; | 48 | struct list_head vlans; |
| 46 | struct sk_buff_head bc_queue; | 49 | struct sk_buff_head bc_queue; |
| 47 | struct work_struct bc_work; | 50 | struct work_struct bc_work; |
| 48 | bool passthru; | 51 | u32 flags; |
| 49 | int count; | 52 | int count; |
| 50 | struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE]; | 53 | struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE]; |
| 51 | DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); | 54 | DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); |
| 55 | unsigned char perm_addr[ETH_ALEN]; | ||
| 52 | }; | 56 | }; |
| 53 | 57 | ||
| 54 | struct macvlan_source_entry { | 58 | struct macvlan_source_entry { |
| @@ -66,6 +70,31 @@ struct macvlan_skb_cb { | |||
| 66 | 70 | ||
| 67 | static void macvlan_port_destroy(struct net_device *dev); | 71 | static void macvlan_port_destroy(struct net_device *dev); |
| 68 | 72 | ||
| 73 | static inline bool macvlan_passthru(const struct macvlan_port *port) | ||
| 74 | { | ||
| 75 | return port->flags & MACVLAN_F_PASSTHRU; | ||
| 76 | } | ||
| 77 | |||
| 78 | static inline void macvlan_set_passthru(struct macvlan_port *port) | ||
| 79 | { | ||
| 80 | port->flags |= MACVLAN_F_PASSTHRU; | ||
| 81 | } | ||
| 82 | |||
| 83 | static inline bool macvlan_addr_change(const struct macvlan_port *port) | ||
| 84 | { | ||
| 85 | return port->flags & MACVLAN_F_ADDRCHANGE; | ||
| 86 | } | ||
| 87 | |||
| 88 | static inline void macvlan_set_addr_change(struct macvlan_port *port) | ||
| 89 | { | ||
| 90 | port->flags |= MACVLAN_F_ADDRCHANGE; | ||
| 91 | } | ||
| 92 | |||
| 93 | static inline void macvlan_clear_addr_change(struct macvlan_port *port) | ||
| 94 | { | ||
| 95 | port->flags &= ~MACVLAN_F_ADDRCHANGE; | ||
| 96 | } | ||
| 97 | |||
| 69 | /* Hash Ethernet address */ | 98 | /* Hash Ethernet address */ |
| 70 | static u32 macvlan_eth_hash(const unsigned char *addr) | 99 | static u32 macvlan_eth_hash(const unsigned char *addr) |
| 71 | { | 100 | { |
| @@ -181,11 +210,12 @@ static void macvlan_hash_change_addr(struct macvlan_dev *vlan, | |||
| 181 | static bool macvlan_addr_busy(const struct macvlan_port *port, | 210 | static bool macvlan_addr_busy(const struct macvlan_port *port, |
| 182 | const unsigned char *addr) | 211 | const unsigned char *addr) |
| 183 | { | 212 | { |
| 184 | /* Test to see if the specified multicast address is | 213 | /* Test to see if the specified address is |
| 185 | * currently in use by the underlying device or | 214 | * currently in use by the underlying device or |
| 186 | * another macvlan. | 215 | * another macvlan. |
| 187 | */ | 216 | */ |
| 188 | if (ether_addr_equal_64bits(port->dev->dev_addr, addr)) | 217 | if (!macvlan_passthru(port) && !macvlan_addr_change(port) && |
| 218 | ether_addr_equal_64bits(port->dev->dev_addr, addr)) | ||
| 189 | return true; | 219 | return true; |
| 190 | 220 | ||
| 191 | if (macvlan_hash_lookup(port, addr)) | 221 | if (macvlan_hash_lookup(port, addr)) |
| @@ -445,7 +475,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) | |||
| 445 | } | 475 | } |
| 446 | 476 | ||
| 447 | macvlan_forward_source(skb, port, eth->h_source); | 477 | macvlan_forward_source(skb, port, eth->h_source); |
| 448 | if (port->passthru) | 478 | if (macvlan_passthru(port)) |
| 449 | vlan = list_first_or_null_rcu(&port->vlans, | 479 | vlan = list_first_or_null_rcu(&port->vlans, |
| 450 | struct macvlan_dev, list); | 480 | struct macvlan_dev, list); |
| 451 | else | 481 | else |
| @@ -574,7 +604,7 @@ static int macvlan_open(struct net_device *dev) | |||
| 574 | struct net_device *lowerdev = vlan->lowerdev; | 604 | struct net_device *lowerdev = vlan->lowerdev; |
| 575 | int err; | 605 | int err; |
| 576 | 606 | ||
| 577 | if (vlan->port->passthru) { | 607 | if (macvlan_passthru(vlan->port)) { |
| 578 | if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) { | 608 | if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) { |
| 579 | err = dev_set_promiscuity(lowerdev, 1); | 609 | err = dev_set_promiscuity(lowerdev, 1); |
| 580 | if (err < 0) | 610 | if (err < 0) |
| @@ -649,7 +679,7 @@ static int macvlan_stop(struct net_device *dev) | |||
| 649 | dev_uc_unsync(lowerdev, dev); | 679 | dev_uc_unsync(lowerdev, dev); |
| 650 | dev_mc_unsync(lowerdev, dev); | 680 | dev_mc_unsync(lowerdev, dev); |
| 651 | 681 | ||
| 652 | if (vlan->port->passthru) { | 682 | if (macvlan_passthru(vlan->port)) { |
| 653 | if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) | 683 | if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) |
| 654 | dev_set_promiscuity(lowerdev, -1); | 684 | dev_set_promiscuity(lowerdev, -1); |
| 655 | goto hash_del; | 685 | goto hash_del; |
| @@ -672,6 +702,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr) | |||
| 672 | { | 702 | { |
| 673 | struct macvlan_dev *vlan = netdev_priv(dev); | 703 | struct macvlan_dev *vlan = netdev_priv(dev); |
| 674 | struct net_device *lowerdev = vlan->lowerdev; | 704 | struct net_device *lowerdev = vlan->lowerdev; |
| 705 | struct macvlan_port *port = vlan->port; | ||
| 675 | int err; | 706 | int err; |
| 676 | 707 | ||
| 677 | if (!(dev->flags & IFF_UP)) { | 708 | if (!(dev->flags & IFF_UP)) { |
| @@ -682,7 +713,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr) | |||
| 682 | if (macvlan_addr_busy(vlan->port, addr)) | 713 | if (macvlan_addr_busy(vlan->port, addr)) |
| 683 | return -EBUSY; | 714 | return -EBUSY; |
| 684 | 715 | ||
| 685 | if (!vlan->port->passthru) { | 716 | if (!macvlan_passthru(port)) { |
| 686 | err = dev_uc_add(lowerdev, addr); | 717 | err = dev_uc_add(lowerdev, addr); |
| 687 | if (err) | 718 | if (err) |
| 688 | return err; | 719 | return err; |
| @@ -692,6 +723,15 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr) | |||
| 692 | 723 | ||
| 693 | macvlan_hash_change_addr(vlan, addr); | 724 | macvlan_hash_change_addr(vlan, addr); |
| 694 | } | 725 | } |
| 726 | if (macvlan_passthru(port) && !macvlan_addr_change(port)) { | ||
| 727 | /* Since addr_change isn't set, we are here due to lower | ||
| 728 | * device change. Save the lower-dev address so we can | ||
| 729 | * restore it later. | ||
| 730 | */ | ||
| 731 | ether_addr_copy(vlan->port->perm_addr, | ||
| 732 | lowerdev->dev_addr); | ||
| 733 | } | ||
| 734 | macvlan_clear_addr_change(port); | ||
| 695 | return 0; | 735 | return 0; |
| 696 | } | 736 | } |
| 697 | 737 | ||
| @@ -703,7 +743,12 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p) | |||
| 703 | if (!is_valid_ether_addr(addr->sa_data)) | 743 | if (!is_valid_ether_addr(addr->sa_data)) |
| 704 | return -EADDRNOTAVAIL; | 744 | return -EADDRNOTAVAIL; |
| 705 | 745 | ||
| 746 | /* If the addresses are the same, this is a no-op */ | ||
| 747 | if (ether_addr_equal(dev->dev_addr, addr->sa_data)) | ||
| 748 | return 0; | ||
| 749 | |||
| 706 | if (vlan->mode == MACVLAN_MODE_PASSTHRU) { | 750 | if (vlan->mode == MACVLAN_MODE_PASSTHRU) { |
| 751 | macvlan_set_addr_change(vlan->port); | ||
| 707 | dev_set_mac_address(vlan->lowerdev, addr); | 752 | dev_set_mac_address(vlan->lowerdev, addr); |
| 708 | return 0; | 753 | return 0; |
| 709 | } | 754 | } |
| @@ -928,7 +973,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], | |||
| 928 | /* Support unicast filter only on passthru devices. | 973 | /* Support unicast filter only on passthru devices. |
| 929 | * Multicast filter should be allowed on all devices. | 974 | * Multicast filter should be allowed on all devices. |
| 930 | */ | 975 | */ |
| 931 | if (!vlan->port->passthru && is_unicast_ether_addr(addr)) | 976 | if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr)) |
| 932 | return -EOPNOTSUPP; | 977 | return -EOPNOTSUPP; |
| 933 | 978 | ||
| 934 | if (flags & NLM_F_REPLACE) | 979 | if (flags & NLM_F_REPLACE) |
| @@ -952,7 +997,7 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], | |||
| 952 | /* Support unicast filter only on passthru devices. | 997 | /* Support unicast filter only on passthru devices. |
| 953 | * Multicast filter should be allowed on all devices. | 998 | * Multicast filter should be allowed on all devices. |
| 954 | */ | 999 | */ |
| 955 | if (!vlan->port->passthru && is_unicast_ether_addr(addr)) | 1000 | if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr)) |
| 956 | return -EOPNOTSUPP; | 1001 | return -EOPNOTSUPP; |
| 957 | 1002 | ||
| 958 | if (is_unicast_ether_addr(addr)) | 1003 | if (is_unicast_ether_addr(addr)) |
| @@ -1092,7 +1137,7 @@ void macvlan_common_setup(struct net_device *dev) | |||
| 1092 | netif_keep_dst(dev); | 1137 | netif_keep_dst(dev); |
| 1093 | dev->priv_flags |= IFF_UNICAST_FLT; | 1138 | dev->priv_flags |= IFF_UNICAST_FLT; |
| 1094 | dev->netdev_ops = &macvlan_netdev_ops; | 1139 | dev->netdev_ops = &macvlan_netdev_ops; |
| 1095 | dev->destructor = free_netdev; | 1140 | dev->needs_free_netdev = true; |
| 1096 | dev->header_ops = &macvlan_hard_header_ops; | 1141 | dev->header_ops = &macvlan_hard_header_ops; |
| 1097 | dev->ethtool_ops = &macvlan_ethtool_ops; | 1142 | dev->ethtool_ops = &macvlan_ethtool_ops; |
| 1098 | } | 1143 | } |
| @@ -1120,8 +1165,8 @@ static int macvlan_port_create(struct net_device *dev) | |||
| 1120 | if (port == NULL) | 1165 | if (port == NULL) |
| 1121 | return -ENOMEM; | 1166 | return -ENOMEM; |
| 1122 | 1167 | ||
| 1123 | port->passthru = false; | ||
| 1124 | port->dev = dev; | 1168 | port->dev = dev; |
| 1169 | ether_addr_copy(port->perm_addr, dev->dev_addr); | ||
| 1125 | INIT_LIST_HEAD(&port->vlans); | 1170 | INIT_LIST_HEAD(&port->vlans); |
| 1126 | for (i = 0; i < MACVLAN_HASH_SIZE; i++) | 1171 | for (i = 0; i < MACVLAN_HASH_SIZE; i++) |
| 1127 | INIT_HLIST_HEAD(&port->vlan_hash[i]); | 1172 | INIT_HLIST_HEAD(&port->vlan_hash[i]); |
| @@ -1161,6 +1206,18 @@ static void macvlan_port_destroy(struct net_device *dev) | |||
| 1161 | kfree_skb(skb); | 1206 | kfree_skb(skb); |
| 1162 | } | 1207 | } |
| 1163 | 1208 | ||
| 1209 | /* If the lower device address has been changed by passthru | ||
| 1210 | * macvlan, put it back. | ||
| 1211 | */ | ||
| 1212 | if (macvlan_passthru(port) && | ||
| 1213 | !ether_addr_equal(port->dev->dev_addr, port->perm_addr)) { | ||
| 1214 | struct sockaddr sa; | ||
| 1215 | |||
| 1216 | sa.sa_family = port->dev->type; | ||
| 1217 | memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len); | ||
| 1218 | dev_set_mac_address(port->dev, &sa); | ||
| 1219 | } | ||
| 1220 | |||
| 1164 | kfree(port); | 1221 | kfree(port); |
| 1165 | } | 1222 | } |
| 1166 | 1223 | ||
| @@ -1326,7 +1383,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
| 1326 | port = macvlan_port_get_rtnl(lowerdev); | 1383 | port = macvlan_port_get_rtnl(lowerdev); |
| 1327 | 1384 | ||
| 1328 | /* Only 1 macvlan device can be created in passthru mode */ | 1385 | /* Only 1 macvlan device can be created in passthru mode */ |
| 1329 | if (port->passthru) { | 1386 | if (macvlan_passthru(port)) { |
| 1330 | /* The macvlan port must be not created this time, | 1387 | /* The macvlan port must be not created this time, |
| 1331 | * still goto destroy_macvlan_port for readability. | 1388 | * still goto destroy_macvlan_port for readability. |
| 1332 | */ | 1389 | */ |
| @@ -1352,7 +1409,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
| 1352 | err = -EINVAL; | 1409 | err = -EINVAL; |
| 1353 | goto destroy_macvlan_port; | 1410 | goto destroy_macvlan_port; |
| 1354 | } | 1411 | } |
| 1355 | port->passthru = true; | 1412 | macvlan_set_passthru(port); |
| 1356 | eth_hw_addr_inherit(dev, lowerdev); | 1413 | eth_hw_addr_inherit(dev, lowerdev); |
| 1357 | } | 1414 | } |
| 1358 | 1415 | ||
| @@ -1434,7 +1491,7 @@ static int macvlan_changelink(struct net_device *dev, | |||
| 1434 | if (data && data[IFLA_MACVLAN_FLAGS]) { | 1491 | if (data && data[IFLA_MACVLAN_FLAGS]) { |
| 1435 | __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); | 1492 | __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); |
| 1436 | bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; | 1493 | bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; |
| 1437 | if (vlan->port->passthru && promisc) { | 1494 | if (macvlan_passthru(vlan->port) && promisc) { |
| 1438 | int err; | 1495 | int err; |
| 1439 | 1496 | ||
| 1440 | if (flags & MACVLAN_FLAG_NOPROMISC) | 1497 | if (flags & MACVLAN_FLAG_NOPROMISC) |
| @@ -1597,7 +1654,7 @@ static int macvlan_device_event(struct notifier_block *unused, | |||
| 1597 | } | 1654 | } |
| 1598 | break; | 1655 | break; |
| 1599 | case NETDEV_CHANGEADDR: | 1656 | case NETDEV_CHANGEADDR: |
| 1600 | if (!port->passthru) | 1657 | if (!macvlan_passthru(port)) |
| 1601 | return NOTIFY_DONE; | 1658 | return NOTIFY_DONE; |
| 1602 | 1659 | ||
| 1603 | vlan = list_first_entry_or_null(&port->vlans, | 1660 | vlan = list_first_entry_or_null(&port->vlans, |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 06ee6395117f..0e27920c2b6b 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c | |||
| @@ -358,7 +358,7 @@ static ssize_t enabled_store(struct config_item *item, | |||
| 358 | if (err) | 358 | if (err) |
| 359 | goto out_unlock; | 359 | goto out_unlock; |
| 360 | 360 | ||
| 361 | pr_info("netconsole: network logging started\n"); | 361 | pr_info("network logging started\n"); |
| 362 | } else { /* false */ | 362 | } else { /* false */ |
| 363 | /* We need to disable the netconsole before cleaning it up | 363 | /* We need to disable the netconsole before cleaning it up |
| 364 | * otherwise we might end up in write_msg() with | 364 | * otherwise we might end up in write_msg() with |
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c index b91603835d26..c4b3362da4a2 100644 --- a/drivers/net/nlmon.c +++ b/drivers/net/nlmon.c | |||
| @@ -113,7 +113,7 @@ static void nlmon_setup(struct net_device *dev) | |||
| 113 | 113 | ||
| 114 | dev->netdev_ops = &nlmon_ops; | 114 | dev->netdev_ops = &nlmon_ops; |
| 115 | dev->ethtool_ops = &nlmon_ethtool_ops; | 115 | dev->ethtool_ops = &nlmon_ethtool_ops; |
| 116 | dev->destructor = free_netdev; | 116 | dev->needs_free_netdev = true; |
| 117 | 117 | ||
| 118 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | | 118 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | |
| 119 | NETIF_F_HIGHDMA | NETIF_F_LLTX; | 119 | NETIF_F_HIGHDMA | NETIF_F_LLTX; |
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index c360dd6ead22..3ab6c58d4be6 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
| @@ -127,6 +127,7 @@ config MDIO_THUNDER | |||
| 127 | tristate "ThunderX SOCs MDIO buses" | 127 | tristate "ThunderX SOCs MDIO buses" |
| 128 | depends on 64BIT | 128 | depends on 64BIT |
| 129 | depends on PCI | 129 | depends on PCI |
| 130 | depends on !(MDIO_DEVICE=y && PHYLIB=m) | ||
| 130 | select MDIO_CAVIUM | 131 | select MDIO_CAVIUM |
| 131 | help | 132 | help |
| 132 | This driver supports the MDIO interfaces found on Cavium | 133 | This driver supports the MDIO interfaces found on Cavium |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index ed0d10f54f26..c3065236ffcc 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
| @@ -908,7 +908,7 @@ static void decode_txts(struct dp83640_private *dp83640, | |||
| 908 | if (overflow) { | 908 | if (overflow) { |
| 909 | pr_debug("tx timestamp queue overflow, count %d\n", overflow); | 909 | pr_debug("tx timestamp queue overflow, count %d\n", overflow); |
| 910 | while (skb) { | 910 | while (skb) { |
| 911 | skb_complete_tx_timestamp(skb, NULL); | 911 | kfree_skb(skb); |
| 912 | skb = skb_dequeue(&dp83640->tx_queue); | 912 | skb = skb_dequeue(&dp83640->tx_queue); |
| 913 | } | 913 | } |
| 914 | return; | 914 | return; |
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 9097e42bec2e..57297ba23987 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
| @@ -1127,8 +1127,6 @@ static int marvell_read_status_page(struct phy_device *phydev, int page) | |||
| 1127 | if (adv < 0) | 1127 | if (adv < 0) |
| 1128 | return adv; | 1128 | return adv; |
| 1129 | 1129 | ||
| 1130 | lpa &= adv; | ||
| 1131 | |||
| 1132 | if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) | 1130 | if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) |
| 1133 | phydev->duplex = DUPLEX_FULL; | 1131 | phydev->duplex = DUPLEX_FULL; |
| 1134 | else | 1132 | else |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 8e73f5f36e71..f99c21f78b63 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
| @@ -658,6 +658,18 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) | |||
| 658 | return 0; | 658 | return 0; |
| 659 | } | 659 | } |
| 660 | 660 | ||
| 661 | static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
| 662 | { | ||
| 663 | int rc; | ||
| 664 | |||
| 665 | /* Some devices have extra OF data and an OF-style MODALIAS */ | ||
| 666 | rc = of_device_uevent_modalias(dev, env); | ||
| 667 | if (rc != -ENODEV) | ||
| 668 | return rc; | ||
| 669 | |||
| 670 | return 0; | ||
| 671 | } | ||
| 672 | |||
| 661 | #ifdef CONFIG_PM | 673 | #ifdef CONFIG_PM |
| 662 | static int mdio_bus_suspend(struct device *dev) | 674 | static int mdio_bus_suspend(struct device *dev) |
| 663 | { | 675 | { |
| @@ -708,6 +720,7 @@ static const struct dev_pm_ops mdio_bus_pm_ops = { | |||
| 708 | struct bus_type mdio_bus_type = { | 720 | struct bus_type mdio_bus_type = { |
| 709 | .name = "mdio_bus", | 721 | .name = "mdio_bus", |
| 710 | .match = mdio_bus_match, | 722 | .match = mdio_bus_match, |
| 723 | .uevent = mdio_uevent, | ||
| 711 | .pm = MDIO_BUS_PM_OPS, | 724 | .pm = MDIO_BUS_PM_OPS, |
| 712 | }; | 725 | }; |
| 713 | EXPORT_SYMBOL(mdio_bus_type); | 726 | EXPORT_SYMBOL(mdio_bus_type); |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 6a5fd18f062c..8b2038844ba9 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
| @@ -268,23 +268,12 @@ out: | |||
| 268 | return ret; | 268 | return ret; |
| 269 | } | 269 | } |
| 270 | 270 | ||
| 271 | static int kszphy_config_init(struct phy_device *phydev) | 271 | /* Some config bits need to be set again on resume, handle them here. */ |
| 272 | static int kszphy_config_reset(struct phy_device *phydev) | ||
| 272 | { | 273 | { |
| 273 | struct kszphy_priv *priv = phydev->priv; | 274 | struct kszphy_priv *priv = phydev->priv; |
| 274 | const struct kszphy_type *type; | ||
| 275 | int ret; | 275 | int ret; |
| 276 | 276 | ||
| 277 | if (!priv) | ||
| 278 | return 0; | ||
| 279 | |||
| 280 | type = priv->type; | ||
| 281 | |||
| 282 | if (type->has_broadcast_disable) | ||
| 283 | kszphy_broadcast_disable(phydev); | ||
| 284 | |||
| 285 | if (type->has_nand_tree_disable) | ||
| 286 | kszphy_nand_tree_disable(phydev); | ||
| 287 | |||
| 288 | if (priv->rmii_ref_clk_sel) { | 277 | if (priv->rmii_ref_clk_sel) { |
| 289 | ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val); | 278 | ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val); |
| 290 | if (ret) { | 279 | if (ret) { |
| @@ -295,11 +284,30 @@ static int kszphy_config_init(struct phy_device *phydev) | |||
| 295 | } | 284 | } |
| 296 | 285 | ||
| 297 | if (priv->led_mode >= 0) | 286 | if (priv->led_mode >= 0) |
| 298 | kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); | 287 | kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode); |
| 299 | 288 | ||
| 300 | return 0; | 289 | return 0; |
| 301 | } | 290 | } |
| 302 | 291 | ||
| 292 | static int kszphy_config_init(struct phy_device *phydev) | ||
| 293 | { | ||
| 294 | struct kszphy_priv *priv = phydev->priv; | ||
| 295 | const struct kszphy_type *type; | ||
| 296 | |||
| 297 | if (!priv) | ||
| 298 | return 0; | ||
| 299 | |||
| 300 | type = priv->type; | ||
| 301 | |||
| 302 | if (type->has_broadcast_disable) | ||
| 303 | kszphy_broadcast_disable(phydev); | ||
| 304 | |||
| 305 | if (type->has_nand_tree_disable) | ||
| 306 | kszphy_nand_tree_disable(phydev); | ||
| 307 | |||
| 308 | return kszphy_config_reset(phydev); | ||
| 309 | } | ||
| 310 | |||
| 303 | static int ksz8041_config_init(struct phy_device *phydev) | 311 | static int ksz8041_config_init(struct phy_device *phydev) |
| 304 | { | 312 | { |
| 305 | struct device_node *of_node = phydev->mdio.dev.of_node; | 313 | struct device_node *of_node = phydev->mdio.dev.of_node; |
| @@ -611,6 +619,8 @@ static int ksz9031_read_status(struct phy_device *phydev) | |||
| 611 | if ((regval & 0xFF) == 0xFF) { | 619 | if ((regval & 0xFF) == 0xFF) { |
| 612 | phy_init_hw(phydev); | 620 | phy_init_hw(phydev); |
| 613 | phydev->link = 0; | 621 | phydev->link = 0; |
| 622 | if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) | ||
| 623 | phydev->drv->config_intr(phydev); | ||
| 614 | } | 624 | } |
| 615 | 625 | ||
| 616 | return 0; | 626 | return 0; |
| @@ -700,8 +710,14 @@ static int kszphy_suspend(struct phy_device *phydev) | |||
| 700 | 710 | ||
| 701 | static int kszphy_resume(struct phy_device *phydev) | 711 | static int kszphy_resume(struct phy_device *phydev) |
| 702 | { | 712 | { |
| 713 | int ret; | ||
| 714 | |||
| 703 | genphy_resume(phydev); | 715 | genphy_resume(phydev); |
| 704 | 716 | ||
| 717 | ret = kszphy_config_reset(phydev); | ||
| 718 | if (ret) | ||
| 719 | return ret; | ||
| 720 | |||
| 705 | /* Enable PHY Interrupts */ | 721 | /* Enable PHY Interrupts */ |
| 706 | if (phy_interrupt_is_valid(phydev)) { | 722 | if (phy_interrupt_is_valid(phydev)) { |
| 707 | phydev->interrupts = PHY_INTERRUPT_ENABLED; | 723 | phydev->interrupts = PHY_INTERRUPT_ENABLED; |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 82ab8fb82587..eebb0e1c70ff 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
| @@ -54,6 +54,8 @@ static const char *phy_speed_to_str(int speed) | |||
| 54 | return "5Gbps"; | 54 | return "5Gbps"; |
| 55 | case SPEED_10000: | 55 | case SPEED_10000: |
| 56 | return "10Gbps"; | 56 | return "10Gbps"; |
| 57 | case SPEED_14000: | ||
| 58 | return "14Gbps"; | ||
| 57 | case SPEED_20000: | 59 | case SPEED_20000: |
| 58 | return "20Gbps"; | 60 | return "20Gbps"; |
| 59 | case SPEED_25000: | 61 | case SPEED_25000: |
| @@ -241,7 +243,7 @@ static const struct phy_setting settings[] = { | |||
| 241 | * phy_lookup_setting - lookup a PHY setting | 243 | * phy_lookup_setting - lookup a PHY setting |
| 242 | * @speed: speed to match | 244 | * @speed: speed to match |
| 243 | * @duplex: duplex to match | 245 | * @duplex: duplex to match |
| 244 | * @feature: allowed link modes | 246 | * @features: allowed link modes |
| 245 | * @exact: an exact match is required | 247 | * @exact: an exact match is required |
| 246 | * | 248 | * |
| 247 | * Search the settings array for a setting that matches the speed and | 249 | * Search the settings array for a setting that matches the speed and |
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 1da31dc47f86..74b907206aa7 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c | |||
| @@ -629,7 +629,7 @@ static void sl_uninit(struct net_device *dev) | |||
| 629 | static void sl_free_netdev(struct net_device *dev) | 629 | static void sl_free_netdev(struct net_device *dev) |
| 630 | { | 630 | { |
| 631 | int i = dev->base_addr; | 631 | int i = dev->base_addr; |
| 632 | free_netdev(dev); | 632 | |
| 633 | slip_devs[i] = NULL; | 633 | slip_devs[i] = NULL; |
| 634 | } | 634 | } |
| 635 | 635 | ||
| @@ -651,7 +651,8 @@ static const struct net_device_ops sl_netdev_ops = { | |||
| 651 | static void sl_setup(struct net_device *dev) | 651 | static void sl_setup(struct net_device *dev) |
| 652 | { | 652 | { |
| 653 | dev->netdev_ops = &sl_netdev_ops; | 653 | dev->netdev_ops = &sl_netdev_ops; |
| 654 | dev->destructor = sl_free_netdev; | 654 | dev->needs_free_netdev = true; |
| 655 | dev->priv_destructor = sl_free_netdev; | ||
| 655 | 656 | ||
| 656 | dev->hard_header_len = 0; | 657 | dev->hard_header_len = 0; |
| 657 | dev->addr_len = 0; | 658 | dev->addr_len = 0; |
| @@ -1369,8 +1370,6 @@ static void __exit slip_exit(void) | |||
| 1369 | if (sl->tty) { | 1370 | if (sl->tty) { |
| 1370 | printk(KERN_ERR "%s: tty discipline still running\n", | 1371 | printk(KERN_ERR "%s: tty discipline still running\n", |
| 1371 | dev->name); | 1372 | dev->name); |
| 1372 | /* Intentionally leak the control block. */ | ||
| 1373 | dev->destructor = NULL; | ||
| 1374 | } | 1373 | } |
| 1375 | 1374 | ||
| 1376 | unregister_netdev(dev); | 1375 | unregister_netdev(dev); |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 6c5d5ef46f75..fba8c136aa7c 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
| @@ -1643,7 +1643,6 @@ static void team_destructor(struct net_device *dev) | |||
| 1643 | struct team *team = netdev_priv(dev); | 1643 | struct team *team = netdev_priv(dev); |
| 1644 | 1644 | ||
| 1645 | free_percpu(team->pcpu_stats); | 1645 | free_percpu(team->pcpu_stats); |
| 1646 | free_netdev(dev); | ||
| 1647 | } | 1646 | } |
| 1648 | 1647 | ||
| 1649 | static int team_open(struct net_device *dev) | 1648 | static int team_open(struct net_device *dev) |
| @@ -2079,7 +2078,8 @@ static void team_setup(struct net_device *dev) | |||
| 2079 | 2078 | ||
| 2080 | dev->netdev_ops = &team_netdev_ops; | 2079 | dev->netdev_ops = &team_netdev_ops; |
| 2081 | dev->ethtool_ops = &team_ethtool_ops; | 2080 | dev->ethtool_ops = &team_ethtool_ops; |
| 2082 | dev->destructor = team_destructor; | 2081 | dev->needs_free_netdev = true; |
| 2082 | dev->priv_destructor = team_destructor; | ||
| 2083 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); | 2083 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); |
| 2084 | dev->priv_flags |= IFF_NO_QUEUE; | 2084 | dev->priv_flags |= IFF_NO_QUEUE; |
| 2085 | dev->priv_flags |= IFF_TEAM; | 2085 | dev->priv_flags |= IFF_TEAM; |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index bbd707b9ef7a..9ee7d4275640 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -1560,7 +1560,6 @@ static void tun_free_netdev(struct net_device *dev) | |||
| 1560 | free_percpu(tun->pcpu_stats); | 1560 | free_percpu(tun->pcpu_stats); |
| 1561 | tun_flow_uninit(tun); | 1561 | tun_flow_uninit(tun); |
| 1562 | security_tun_dev_free_security(tun->security); | 1562 | security_tun_dev_free_security(tun->security); |
| 1563 | free_netdev(dev); | ||
| 1564 | } | 1563 | } |
| 1565 | 1564 | ||
| 1566 | static void tun_setup(struct net_device *dev) | 1565 | static void tun_setup(struct net_device *dev) |
| @@ -1571,7 +1570,8 @@ static void tun_setup(struct net_device *dev) | |||
| 1571 | tun->group = INVALID_GID; | 1570 | tun->group = INVALID_GID; |
| 1572 | 1571 | ||
| 1573 | dev->ethtool_ops = &tun_ethtool_ops; | 1572 | dev->ethtool_ops = &tun_ethtool_ops; |
| 1574 | dev->destructor = tun_free_netdev; | 1573 | dev->needs_free_netdev = true; |
| 1574 | dev->priv_destructor = tun_free_netdev; | ||
| 1575 | /* We prefer our own queue length */ | 1575 | /* We prefer our own queue length */ |
| 1576 | dev->tx_queue_len = TUN_READQ_SIZE; | 1576 | dev->tx_queue_len = TUN_READQ_SIZE; |
| 1577 | } | 1577 | } |
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 51cf60092a18..4037ab27734a 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c | |||
| @@ -1722,6 +1722,18 @@ static const struct driver_info lenovo_info = { | |||
| 1722 | .tx_fixup = ax88179_tx_fixup, | 1722 | .tx_fixup = ax88179_tx_fixup, |
| 1723 | }; | 1723 | }; |
| 1724 | 1724 | ||
| 1725 | static const struct driver_info belkin_info = { | ||
| 1726 | .description = "Belkin USB Ethernet Adapter", | ||
| 1727 | .bind = ax88179_bind, | ||
| 1728 | .unbind = ax88179_unbind, | ||
| 1729 | .status = ax88179_status, | ||
| 1730 | .link_reset = ax88179_link_reset, | ||
| 1731 | .reset = ax88179_reset, | ||
| 1732 | .flags = FLAG_ETHER | FLAG_FRAMING_AX, | ||
| 1733 | .rx_fixup = ax88179_rx_fixup, | ||
| 1734 | .tx_fixup = ax88179_tx_fixup, | ||
| 1735 | }; | ||
| 1736 | |||
| 1725 | static const struct usb_device_id products[] = { | 1737 | static const struct usb_device_id products[] = { |
| 1726 | { | 1738 | { |
| 1727 | /* ASIX AX88179 10/100/1000 */ | 1739 | /* ASIX AX88179 10/100/1000 */ |
| @@ -1751,6 +1763,10 @@ static const struct usb_device_id products[] = { | |||
| 1751 | /* Lenovo OneLinkDock Gigabit LAN */ | 1763 | /* Lenovo OneLinkDock Gigabit LAN */ |
| 1752 | USB_DEVICE(0x17ef, 0x304b), | 1764 | USB_DEVICE(0x17ef, 0x304b), |
| 1753 | .driver_info = (unsigned long)&lenovo_info, | 1765 | .driver_info = (unsigned long)&lenovo_info, |
| 1766 | }, { | ||
| 1767 | /* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */ | ||
| 1768 | USB_DEVICE(0x050d, 0x0128), | ||
| 1769 | .driver_info = (unsigned long)&belkin_info, | ||
| 1754 | }, | 1770 | }, |
| 1755 | { }, | 1771 | { }, |
| 1756 | }; | 1772 | }; |
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index eb52de8205f0..c7a350bbaaa7 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c | |||
| @@ -298,7 +298,7 @@ static void usbpn_setup(struct net_device *dev) | |||
| 298 | dev->addr_len = 1; | 298 | dev->addr_len = 1; |
| 299 | dev->tx_queue_len = 3; | 299 | dev->tx_queue_len = 3; |
| 300 | 300 | ||
| 301 | dev->destructor = free_netdev; | 301 | dev->needs_free_netdev = true; |
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | /* | 304 | /* |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 8f923a147fa9..32a22f4e8356 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
| @@ -123,7 +123,7 @@ static void qmimux_setup(struct net_device *dev) | |||
| 123 | dev->addr_len = 0; | 123 | dev->addr_len = 0; |
| 124 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; | 124 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; |
| 125 | dev->netdev_ops = &qmimux_netdev_ops; | 125 | dev->netdev_ops = &qmimux_netdev_ops; |
| 126 | dev->destructor = free_netdev; | 126 | dev->needs_free_netdev = true; |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id) | 129 | static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id) |
| @@ -1192,6 +1192,8 @@ static const struct usb_device_id products[] = { | |||
| 1192 | {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ | 1192 | {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ |
| 1193 | {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, | 1193 | {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, |
| 1194 | {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ | 1194 | {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ |
| 1195 | {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ | ||
| 1196 | {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ | ||
| 1195 | {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ | 1197 | {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ |
| 1196 | {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ | 1198 | {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ |
| 1197 | {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ | 1199 | {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ |
| @@ -1206,6 +1208,8 @@ static const struct usb_device_id products[] = { | |||
| 1206 | {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ | 1208 | {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ |
| 1207 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ | 1209 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ |
| 1208 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ | 1210 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ |
| 1211 | {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ | ||
| 1212 | {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */ | ||
| 1209 | {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ | 1213 | {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ |
| 1210 | {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ | 1214 | {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ |
| 1211 | {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ | 1215 | {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ddc62cb69be8..1a419a45e2a2 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -4368,6 +4368,8 @@ static u8 rtl_get_version(struct usb_interface *intf) | |||
| 4368 | break; | 4368 | break; |
| 4369 | } | 4369 | } |
| 4370 | 4370 | ||
| 4371 | dev_dbg(&intf->dev, "Detected version 0x%04x\n", version); | ||
| 4372 | |||
| 4371 | return version; | 4373 | return version; |
| 4372 | } | 4374 | } |
| 4373 | 4375 | ||
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 38f0f03a29c8..364fa9d11d1a 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c | |||
| @@ -222,7 +222,6 @@ static int veth_dev_init(struct net_device *dev) | |||
| 222 | static void veth_dev_free(struct net_device *dev) | 222 | static void veth_dev_free(struct net_device *dev) |
| 223 | { | 223 | { |
| 224 | free_percpu(dev->vstats); | 224 | free_percpu(dev->vstats); |
| 225 | free_netdev(dev); | ||
| 226 | } | 225 | } |
| 227 | 226 | ||
| 228 | #ifdef CONFIG_NET_POLL_CONTROLLER | 227 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| @@ -317,7 +316,8 @@ static void veth_setup(struct net_device *dev) | |||
| 317 | NETIF_F_HW_VLAN_STAG_TX | | 316 | NETIF_F_HW_VLAN_STAG_TX | |
| 318 | NETIF_F_HW_VLAN_CTAG_RX | | 317 | NETIF_F_HW_VLAN_CTAG_RX | |
| 319 | NETIF_F_HW_VLAN_STAG_RX); | 318 | NETIF_F_HW_VLAN_STAG_RX); |
| 320 | dev->destructor = veth_dev_free; | 319 | dev->needs_free_netdev = true; |
| 320 | dev->priv_destructor = veth_dev_free; | ||
| 321 | dev->max_mtu = ETH_MAX_MTU; | 321 | dev->max_mtu = ETH_MAX_MTU; |
| 322 | 322 | ||
| 323 | dev->hw_features = VETH_FEATURES; | 323 | dev->hw_features = VETH_FEATURES; |
| @@ -383,7 +383,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, | |||
| 383 | tbp = tb; | 383 | tbp = tb; |
| 384 | } | 384 | } |
| 385 | 385 | ||
| 386 | if (tbp[IFLA_IFNAME]) { | 386 | if (ifmp && tbp[IFLA_IFNAME]) { |
| 387 | nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); | 387 | nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); |
| 388 | name_assign_type = NET_NAME_USER; | 388 | name_assign_type = NET_NAME_USER; |
| 389 | } else { | 389 | } else { |
| @@ -402,7 +402,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, | |||
| 402 | return PTR_ERR(peer); | 402 | return PTR_ERR(peer); |
| 403 | } | 403 | } |
| 404 | 404 | ||
| 405 | if (tbp[IFLA_ADDRESS] == NULL) | 405 | if (!ifmp || !tbp[IFLA_ADDRESS]) |
| 406 | eth_hw_addr_random(peer); | 406 | eth_hw_addr_random(peer); |
| 407 | 407 | ||
| 408 | if (ifmp && (dev->ifindex != 0)) | 408 | if (ifmp && (dev->ifindex != 0)) |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 3e9246cc49c3..143d8a95a60d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -869,7 +869,7 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq, | |||
| 869 | unsigned int len; | 869 | unsigned int len; |
| 870 | 870 | ||
| 871 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), | 871 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), |
| 872 | rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); | 872 | rq->min_buf_len, PAGE_SIZE - hdr_len); |
| 873 | return ALIGN(len, L1_CACHE_BYTES); | 873 | return ALIGN(len, L1_CACHE_BYTES); |
| 874 | } | 874 | } |
| 875 | 875 | ||
| @@ -1797,6 +1797,7 @@ static void virtnet_freeze_down(struct virtio_device *vdev) | |||
| 1797 | flush_work(&vi->config_work); | 1797 | flush_work(&vi->config_work); |
| 1798 | 1798 | ||
| 1799 | netif_device_detach(vi->dev); | 1799 | netif_device_detach(vi->dev); |
| 1800 | netif_tx_disable(vi->dev); | ||
| 1800 | cancel_delayed_work_sync(&vi->refill); | 1801 | cancel_delayed_work_sync(&vi->refill); |
| 1801 | 1802 | ||
| 1802 | if (netif_running(vi->dev)) { | 1803 | if (netif_running(vi->dev)) { |
| @@ -2144,7 +2145,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu | |||
| 2144 | unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; | 2145 | unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; |
| 2145 | unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); | 2146 | unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); |
| 2146 | 2147 | ||
| 2147 | return max(min_buf_len, hdr_len); | 2148 | return max(max(min_buf_len, hdr_len) - hdr_len, |
| 2149 | (unsigned int)GOOD_PACKET_LEN); | ||
| 2148 | } | 2150 | } |
| 2149 | 2151 | ||
| 2150 | static int virtnet_find_vqs(struct virtnet_info *vi) | 2152 | static int virtnet_find_vqs(struct virtnet_info *vi) |
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index db882493875c..022c0b5f9844 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c | |||
| @@ -36,12 +36,14 @@ | |||
| 36 | #include <net/addrconf.h> | 36 | #include <net/addrconf.h> |
| 37 | #include <net/l3mdev.h> | 37 | #include <net/l3mdev.h> |
| 38 | #include <net/fib_rules.h> | 38 | #include <net/fib_rules.h> |
| 39 | #include <net/netns/generic.h> | ||
| 39 | 40 | ||
| 40 | #define DRV_NAME "vrf" | 41 | #define DRV_NAME "vrf" |
| 41 | #define DRV_VERSION "1.0" | 42 | #define DRV_VERSION "1.0" |
| 42 | 43 | ||
| 43 | #define FIB_RULE_PREF 1000 /* default preference for FIB rules */ | 44 | #define FIB_RULE_PREF 1000 /* default preference for FIB rules */ |
| 44 | static bool add_fib_rules = true; | 45 | |
| 46 | static unsigned int vrf_net_id; | ||
| 45 | 47 | ||
| 46 | struct net_vrf { | 48 | struct net_vrf { |
| 47 | struct rtable __rcu *rth; | 49 | struct rtable __rcu *rth; |
| @@ -1348,7 +1350,7 @@ static void vrf_setup(struct net_device *dev) | |||
| 1348 | dev->netdev_ops = &vrf_netdev_ops; | 1350 | dev->netdev_ops = &vrf_netdev_ops; |
| 1349 | dev->l3mdev_ops = &vrf_l3mdev_ops; | 1351 | dev->l3mdev_ops = &vrf_l3mdev_ops; |
| 1350 | dev->ethtool_ops = &vrf_ethtool_ops; | 1352 | dev->ethtool_ops = &vrf_ethtool_ops; |
| 1351 | dev->destructor = free_netdev; | 1353 | dev->needs_free_netdev = true; |
| 1352 | 1354 | ||
| 1353 | /* Fill in device structure with ethernet-generic values. */ | 1355 | /* Fill in device structure with ethernet-generic values. */ |
| 1354 | eth_hw_addr_random(dev); | 1356 | eth_hw_addr_random(dev); |
| @@ -1394,6 +1396,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, | |||
| 1394 | struct nlattr *tb[], struct nlattr *data[]) | 1396 | struct nlattr *tb[], struct nlattr *data[]) |
| 1395 | { | 1397 | { |
| 1396 | struct net_vrf *vrf = netdev_priv(dev); | 1398 | struct net_vrf *vrf = netdev_priv(dev); |
| 1399 | bool *add_fib_rules; | ||
| 1400 | struct net *net; | ||
| 1397 | int err; | 1401 | int err; |
| 1398 | 1402 | ||
| 1399 | if (!data || !data[IFLA_VRF_TABLE]) | 1403 | if (!data || !data[IFLA_VRF_TABLE]) |
| @@ -1409,13 +1413,15 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, | |||
| 1409 | if (err) | 1413 | if (err) |
| 1410 | goto out; | 1414 | goto out; |
| 1411 | 1415 | ||
| 1412 | if (add_fib_rules) { | 1416 | net = dev_net(dev); |
| 1417 | add_fib_rules = net_generic(net, vrf_net_id); | ||
| 1418 | if (*add_fib_rules) { | ||
| 1413 | err = vrf_add_fib_rules(dev); | 1419 | err = vrf_add_fib_rules(dev); |
| 1414 | if (err) { | 1420 | if (err) { |
| 1415 | unregister_netdevice(dev); | 1421 | unregister_netdevice(dev); |
| 1416 | goto out; | 1422 | goto out; |
| 1417 | } | 1423 | } |
| 1418 | add_fib_rules = false; | 1424 | *add_fib_rules = false; |
| 1419 | } | 1425 | } |
| 1420 | 1426 | ||
| 1421 | out: | 1427 | out: |
| @@ -1498,16 +1504,38 @@ static struct notifier_block vrf_notifier_block __read_mostly = { | |||
| 1498 | .notifier_call = vrf_device_event, | 1504 | .notifier_call = vrf_device_event, |
| 1499 | }; | 1505 | }; |
| 1500 | 1506 | ||
| 1507 | /* Initialize per network namespace state */ | ||
| 1508 | static int __net_init vrf_netns_init(struct net *net) | ||
| 1509 | { | ||
| 1510 | bool *add_fib_rules = net_generic(net, vrf_net_id); | ||
| 1511 | |||
| 1512 | *add_fib_rules = true; | ||
| 1513 | |||
| 1514 | return 0; | ||
| 1515 | } | ||
| 1516 | |||
| 1517 | static struct pernet_operations vrf_net_ops __net_initdata = { | ||
| 1518 | .init = vrf_netns_init, | ||
| 1519 | .id = &vrf_net_id, | ||
| 1520 | .size = sizeof(bool), | ||
| 1521 | }; | ||
| 1522 | |||
| 1501 | static int __init vrf_init_module(void) | 1523 | static int __init vrf_init_module(void) |
| 1502 | { | 1524 | { |
| 1503 | int rc; | 1525 | int rc; |
| 1504 | 1526 | ||
| 1505 | register_netdevice_notifier(&vrf_notifier_block); | 1527 | register_netdevice_notifier(&vrf_notifier_block); |
| 1506 | 1528 | ||
| 1507 | rc = rtnl_link_register(&vrf_link_ops); | 1529 | rc = register_pernet_subsys(&vrf_net_ops); |
| 1508 | if (rc < 0) | 1530 | if (rc < 0) |
| 1509 | goto error; | 1531 | goto error; |
| 1510 | 1532 | ||
| 1533 | rc = rtnl_link_register(&vrf_link_ops); | ||
| 1534 | if (rc < 0) { | ||
| 1535 | unregister_pernet_subsys(&vrf_net_ops); | ||
| 1536 | goto error; | ||
| 1537 | } | ||
| 1538 | |||
| 1511 | return 0; | 1539 | return 0; |
| 1512 | 1540 | ||
| 1513 | error: | 1541 | error: |
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c index 7f0136f2dd9d..c28bdce14fd5 100644 --- a/drivers/net/vsockmon.c +++ b/drivers/net/vsockmon.c | |||
| @@ -135,7 +135,7 @@ static void vsockmon_setup(struct net_device *dev) | |||
| 135 | 135 | ||
| 136 | dev->netdev_ops = &vsockmon_ops; | 136 | dev->netdev_ops = &vsockmon_ops; |
| 137 | dev->ethtool_ops = &vsockmon_ethtool_ops; | 137 | dev->ethtool_ops = &vsockmon_ethtool_ops; |
| 138 | dev->destructor = free_netdev; | 138 | dev->needs_free_netdev = true; |
| 139 | 139 | ||
| 140 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | | 140 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | |
| 141 | NETIF_F_HIGHDMA | NETIF_F_LLTX; | 141 | NETIF_F_HIGHDMA | NETIF_F_LLTX; |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 328b4712683c..5fa798a5c9a6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -59,6 +59,8 @@ static const u8 all_zeros_mac[ETH_ALEN + 2]; | |||
| 59 | 59 | ||
| 60 | static int vxlan_sock_add(struct vxlan_dev *vxlan); | 60 | static int vxlan_sock_add(struct vxlan_dev *vxlan); |
| 61 | 61 | ||
| 62 | static void vxlan_vs_del_dev(struct vxlan_dev *vxlan); | ||
| 63 | |||
| 62 | /* per-network namespace private data for this module */ | 64 | /* per-network namespace private data for this module */ |
| 63 | struct vxlan_net { | 65 | struct vxlan_net { |
| 64 | struct list_head vxlan_list; | 66 | struct list_head vxlan_list; |
| @@ -740,6 +742,22 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) | |||
| 740 | call_rcu(&f->rcu, vxlan_fdb_free); | 742 | call_rcu(&f->rcu, vxlan_fdb_free); |
| 741 | } | 743 | } |
| 742 | 744 | ||
| 745 | static void vxlan_dst_free(struct rcu_head *head) | ||
| 746 | { | ||
| 747 | struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu); | ||
| 748 | |||
| 749 | dst_cache_destroy(&rd->dst_cache); | ||
| 750 | kfree(rd); | ||
| 751 | } | ||
| 752 | |||
| 753 | static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, | ||
| 754 | struct vxlan_rdst *rd) | ||
| 755 | { | ||
| 756 | list_del_rcu(&rd->list); | ||
| 757 | vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); | ||
| 758 | call_rcu(&rd->rcu, vxlan_dst_free); | ||
| 759 | } | ||
| 760 | |||
| 743 | static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, | 761 | static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, |
| 744 | union vxlan_addr *ip, __be16 *port, __be32 *src_vni, | 762 | union vxlan_addr *ip, __be16 *port, __be32 *src_vni, |
| 745 | __be32 *vni, u32 *ifindex) | 763 | __be32 *vni, u32 *ifindex) |
| @@ -864,9 +882,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, | |||
| 864 | * otherwise destroy the fdb entry | 882 | * otherwise destroy the fdb entry |
| 865 | */ | 883 | */ |
| 866 | if (rd && !list_is_singular(&f->remotes)) { | 884 | if (rd && !list_is_singular(&f->remotes)) { |
| 867 | list_del_rcu(&rd->list); | 885 | vxlan_fdb_dst_destroy(vxlan, f, rd); |
| 868 | vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); | ||
| 869 | kfree_rcu(rd, rcu); | ||
| 870 | goto out; | 886 | goto out; |
| 871 | } | 887 | } |
| 872 | 888 | ||
| @@ -1067,6 +1083,8 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan) | |||
| 1067 | rcu_assign_pointer(vxlan->vn4_sock, NULL); | 1083 | rcu_assign_pointer(vxlan->vn4_sock, NULL); |
| 1068 | synchronize_net(); | 1084 | synchronize_net(); |
| 1069 | 1085 | ||
| 1086 | vxlan_vs_del_dev(vxlan); | ||
| 1087 | |||
| 1070 | if (__vxlan_sock_release_prep(sock4)) { | 1088 | if (__vxlan_sock_release_prep(sock4)) { |
| 1071 | udp_tunnel_sock_release(sock4->sock); | 1089 | udp_tunnel_sock_release(sock4->sock); |
| 1072 | kfree(sock4); | 1090 | kfree(sock4); |
| @@ -2342,6 +2360,15 @@ static void vxlan_cleanup(unsigned long arg) | |||
| 2342 | mod_timer(&vxlan->age_timer, next_timer); | 2360 | mod_timer(&vxlan->age_timer, next_timer); |
| 2343 | } | 2361 | } |
| 2344 | 2362 | ||
| 2363 | static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) | ||
| 2364 | { | ||
| 2365 | struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); | ||
| 2366 | |||
| 2367 | spin_lock(&vn->sock_lock); | ||
| 2368 | hlist_del_init_rcu(&vxlan->hlist); | ||
| 2369 | spin_unlock(&vn->sock_lock); | ||
| 2370 | } | ||
| 2371 | |||
| 2345 | static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) | 2372 | static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) |
| 2346 | { | 2373 | { |
| 2347 | struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); | 2374 | struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); |
| @@ -2584,7 +2611,7 @@ static void vxlan_setup(struct net_device *dev) | |||
| 2584 | eth_hw_addr_random(dev); | 2611 | eth_hw_addr_random(dev); |
| 2585 | ether_setup(dev); | 2612 | ether_setup(dev); |
| 2586 | 2613 | ||
| 2587 | dev->destructor = free_netdev; | 2614 | dev->needs_free_netdev = true; |
| 2588 | SET_NETDEV_DEVTYPE(dev, &vxlan_type); | 2615 | SET_NETDEV_DEVTYPE(dev, &vxlan_type); |
| 2589 | 2616 | ||
| 2590 | dev->features |= NETIF_F_LLTX; | 2617 | dev->features |= NETIF_F_LLTX; |
| @@ -3286,15 +3313,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], | |||
| 3286 | static void vxlan_dellink(struct net_device *dev, struct list_head *head) | 3313 | static void vxlan_dellink(struct net_device *dev, struct list_head *head) |
| 3287 | { | 3314 | { |
| 3288 | struct vxlan_dev *vxlan = netdev_priv(dev); | 3315 | struct vxlan_dev *vxlan = netdev_priv(dev); |
| 3289 | struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); | ||
| 3290 | 3316 | ||
| 3291 | vxlan_flush(vxlan, true); | 3317 | vxlan_flush(vxlan, true); |
| 3292 | 3318 | ||
| 3293 | spin_lock(&vn->sock_lock); | ||
| 3294 | if (!hlist_unhashed(&vxlan->hlist)) | ||
| 3295 | hlist_del_rcu(&vxlan->hlist); | ||
| 3296 | spin_unlock(&vn->sock_lock); | ||
| 3297 | |||
| 3298 | gro_cells_destroy(&vxlan->gro_cells); | 3319 | gro_cells_destroy(&vxlan->gro_cells); |
| 3299 | list_del(&vxlan->next); | 3320 | list_del(&vxlan->next); |
| 3300 | unregister_netdevice_queue(dev, head); | 3321 | unregister_netdevice_queue(dev, head); |
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 65ee2a6f248c..a0d76f70c428 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c | |||
| @@ -475,7 +475,7 @@ static void dlci_setup(struct net_device *dev) | |||
| 475 | dev->flags = 0; | 475 | dev->flags = 0; |
| 476 | dev->header_ops = &dlci_header_ops; | 476 | dev->header_ops = &dlci_header_ops; |
| 477 | dev->netdev_ops = &dlci_netdev_ops; | 477 | dev->netdev_ops = &dlci_netdev_ops; |
| 478 | dev->destructor = free_netdev; | 478 | dev->needs_free_netdev = true; |
| 479 | 479 | ||
| 480 | dlp->receive = dlci_receive; | 480 | dlp->receive = dlci_receive; |
| 481 | 481 | ||
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index eb915281197e..78596e42a3f3 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c | |||
| @@ -1106,7 +1106,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) | |||
| 1106 | return -EIO; | 1106 | return -EIO; |
| 1107 | } | 1107 | } |
| 1108 | 1108 | ||
| 1109 | dev->destructor = free_netdev; | 1109 | dev->needs_free_netdev = true; |
| 1110 | *get_dev_p(pvc, type) = dev; | 1110 | *get_dev_p(pvc, type) = dev; |
| 1111 | if (!used) { | 1111 | if (!used) { |
| 1112 | state(hdlc)->dce_changed = 1; | 1112 | state(hdlc)->dce_changed = 1; |
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 9df9ed62beff..63f749078a1f 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c | |||
| @@ -306,7 +306,7 @@ static const struct net_device_ops lapbeth_netdev_ops = { | |||
| 306 | static void lapbeth_setup(struct net_device *dev) | 306 | static void lapbeth_setup(struct net_device *dev) |
| 307 | { | 307 | { |
| 308 | dev->netdev_ops = &lapbeth_netdev_ops; | 308 | dev->netdev_ops = &lapbeth_netdev_ops; |
| 309 | dev->destructor = free_netdev; | 309 | dev->needs_free_netdev = true; |
| 310 | dev->type = ARPHRD_X25; | 310 | dev->type = ARPHRD_X25; |
| 311 | dev->hard_header_len = 3; | 311 | dev->hard_header_len = 3; |
| 312 | dev->mtu = 1000; | 312 | dev->mtu = 1000; |
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index 91ee542de3d7..b90c77ef792e 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c | |||
| @@ -1287,7 +1287,7 @@ void init_netdev(struct net_device *dev) | |||
| 1287 | struct ath6kl *ar = ath6kl_priv(dev); | 1287 | struct ath6kl *ar = ath6kl_priv(dev); |
| 1288 | 1288 | ||
| 1289 | dev->netdev_ops = &ath6kl_netdev_ops; | 1289 | dev->netdev_ops = &ath6kl_netdev_ops; |
| 1290 | dev->destructor = free_netdev; | 1290 | dev->needs_free_netdev = true; |
| 1291 | dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; | 1291 | dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; |
| 1292 | 1292 | ||
| 1293 | dev->needed_headroom = ETH_HLEN; | 1293 | dev->needed_headroom = ETH_HLEN; |
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index d5e993dc9b23..517a315e259b 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c | |||
| @@ -1271,6 +1271,8 @@ static int wcn36xx_remove(struct platform_device *pdev) | |||
| 1271 | qcom_smem_state_put(wcn->tx_enable_state); | 1271 | qcom_smem_state_put(wcn->tx_enable_state); |
| 1272 | qcom_smem_state_put(wcn->tx_rings_empty_state); | 1272 | qcom_smem_state_put(wcn->tx_rings_empty_state); |
| 1273 | 1273 | ||
| 1274 | rpmsg_destroy_ept(wcn->smd_channel); | ||
| 1275 | |||
| 1274 | iounmap(wcn->dxe_base); | 1276 | iounmap(wcn->dxe_base); |
| 1275 | iounmap(wcn->ccu_base); | 1277 | iounmap(wcn->ccu_base); |
| 1276 | 1278 | ||
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index cd1d6730eab7..617199c0e5a0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | |||
| @@ -5225,7 +5225,6 @@ void brcmf_cfg80211_free_netdev(struct net_device *ndev) | |||
| 5225 | 5225 | ||
| 5226 | if (vif) | 5226 | if (vif) |
| 5227 | brcmf_free_vif(vif); | 5227 | brcmf_free_vif(vif); |
| 5228 | free_netdev(ndev); | ||
| 5229 | } | 5228 | } |
| 5230 | 5229 | ||
| 5231 | static bool brcmf_is_linkup(const struct brcmf_event_msg *e) | 5230 | static bool brcmf_is_linkup(const struct brcmf_event_msg *e) |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index a3d82368f1a9..511d190c6cca 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c | |||
| @@ -624,7 +624,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx, | |||
| 624 | if (!ndev) | 624 | if (!ndev) |
| 625 | return ERR_PTR(-ENOMEM); | 625 | return ERR_PTR(-ENOMEM); |
| 626 | 626 | ||
| 627 | ndev->destructor = brcmf_cfg80211_free_netdev; | 627 | ndev->needs_free_netdev = true; |
| 628 | ndev->priv_destructor = brcmf_cfg80211_free_netdev; | ||
| 628 | ifp = netdev_priv(ndev); | 629 | ifp = netdev_priv(ndev); |
| 629 | ifp->ndev = ndev; | 630 | ifp->ndev = ndev; |
| 630 | /* store mapping ifidx to bsscfgidx */ | 631 | /* store mapping ifidx to bsscfgidx */ |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index c7c1e9906500..d231042f19d6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c | |||
| @@ -442,7 +442,7 @@ struct brcmf_fw { | |||
| 442 | const char *nvram_name; | 442 | const char *nvram_name; |
| 443 | u16 domain_nr; | 443 | u16 domain_nr; |
| 444 | u16 bus_nr; | 444 | u16 bus_nr; |
| 445 | void (*done)(struct device *dev, const struct firmware *fw, | 445 | void (*done)(struct device *dev, int err, const struct firmware *fw, |
| 446 | void *nvram_image, u32 nvram_len); | 446 | void *nvram_image, u32 nvram_len); |
| 447 | }; | 447 | }; |
| 448 | 448 | ||
| @@ -477,52 +477,51 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) | |||
| 477 | if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) | 477 | if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) |
| 478 | goto fail; | 478 | goto fail; |
| 479 | 479 | ||
| 480 | fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length); | 480 | fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length); |
| 481 | kfree(fwctx); | 481 | kfree(fwctx); |
| 482 | return; | 482 | return; |
| 483 | 483 | ||
| 484 | fail: | 484 | fail: |
| 485 | brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); | 485 | brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); |
| 486 | release_firmware(fwctx->code); | 486 | release_firmware(fwctx->code); |
| 487 | device_release_driver(fwctx->dev); | 487 | fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0); |
| 488 | kfree(fwctx); | 488 | kfree(fwctx); |
| 489 | } | 489 | } |
| 490 | 490 | ||
| 491 | static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) | 491 | static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) |
| 492 | { | 492 | { |
| 493 | struct brcmf_fw *fwctx = ctx; | 493 | struct brcmf_fw *fwctx = ctx; |
| 494 | int ret; | 494 | int ret = 0; |
| 495 | 495 | ||
| 496 | brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); | 496 | brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); |
| 497 | if (!fw) | 497 | if (!fw) { |
| 498 | ret = -ENOENT; | ||
| 498 | goto fail; | 499 | goto fail; |
| 499 | |||
| 500 | /* only requested code so done here */ | ||
| 501 | if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) { | ||
| 502 | fwctx->done(fwctx->dev, fw, NULL, 0); | ||
| 503 | kfree(fwctx); | ||
| 504 | return; | ||
| 505 | } | 500 | } |
| 501 | /* only requested code so done here */ | ||
| 502 | if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) | ||
| 503 | goto done; | ||
| 504 | |||
| 506 | fwctx->code = fw; | 505 | fwctx->code = fw; |
| 507 | ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name, | 506 | ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name, |
| 508 | fwctx->dev, GFP_KERNEL, fwctx, | 507 | fwctx->dev, GFP_KERNEL, fwctx, |
| 509 | brcmf_fw_request_nvram_done); | 508 | brcmf_fw_request_nvram_done); |
| 510 | 509 | ||
| 511 | if (!ret) | 510 | /* pass NULL to nvram callback for bcm47xx fallback */ |
| 512 | return; | 511 | if (ret) |
| 513 | 512 | brcmf_fw_request_nvram_done(NULL, fwctx); | |
| 514 | brcmf_fw_request_nvram_done(NULL, fwctx); | ||
| 515 | return; | 513 | return; |
| 516 | 514 | ||
| 517 | fail: | 515 | fail: |
| 518 | brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); | 516 | brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); |
| 519 | device_release_driver(fwctx->dev); | 517 | done: |
| 518 | fwctx->done(fwctx->dev, ret, fw, NULL, 0); | ||
| 520 | kfree(fwctx); | 519 | kfree(fwctx); |
| 521 | } | 520 | } |
| 522 | 521 | ||
| 523 | int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, | 522 | int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, |
| 524 | const char *code, const char *nvram, | 523 | const char *code, const char *nvram, |
| 525 | void (*fw_cb)(struct device *dev, | 524 | void (*fw_cb)(struct device *dev, int err, |
| 526 | const struct firmware *fw, | 525 | const struct firmware *fw, |
| 527 | void *nvram_image, u32 nvram_len), | 526 | void *nvram_image, u32 nvram_len), |
| 528 | u16 domain_nr, u16 bus_nr) | 527 | u16 domain_nr, u16 bus_nr) |
| @@ -555,7 +554,7 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, | |||
| 555 | 554 | ||
| 556 | int brcmf_fw_get_firmwares(struct device *dev, u16 flags, | 555 | int brcmf_fw_get_firmwares(struct device *dev, u16 flags, |
| 557 | const char *code, const char *nvram, | 556 | const char *code, const char *nvram, |
| 558 | void (*fw_cb)(struct device *dev, | 557 | void (*fw_cb)(struct device *dev, int err, |
| 559 | const struct firmware *fw, | 558 | const struct firmware *fw, |
| 560 | void *nvram_image, u32 nvram_len)) | 559 | void *nvram_image, u32 nvram_len)) |
| 561 | { | 560 | { |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h index d3c9f0d52ae3..8fa4b7e1ab3d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h | |||
| @@ -73,13 +73,13 @@ void brcmf_fw_nvram_free(void *nvram); | |||
| 73 | */ | 73 | */ |
| 74 | int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, | 74 | int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, |
| 75 | const char *code, const char *nvram, | 75 | const char *code, const char *nvram, |
| 76 | void (*fw_cb)(struct device *dev, | 76 | void (*fw_cb)(struct device *dev, int err, |
| 77 | const struct firmware *fw, | 77 | const struct firmware *fw, |
| 78 | void *nvram_image, u32 nvram_len), | 78 | void *nvram_image, u32 nvram_len), |
| 79 | u16 domain_nr, u16 bus_nr); | 79 | u16 domain_nr, u16 bus_nr); |
| 80 | int brcmf_fw_get_firmwares(struct device *dev, u16 flags, | 80 | int brcmf_fw_get_firmwares(struct device *dev, u16 flags, |
| 81 | const char *code, const char *nvram, | 81 | const char *code, const char *nvram, |
| 82 | void (*fw_cb)(struct device *dev, | 82 | void (*fw_cb)(struct device *dev, int err, |
| 83 | const struct firmware *fw, | 83 | const struct firmware *fw, |
| 84 | void *nvram_image, u32 nvram_len)); | 84 | void *nvram_image, u32 nvram_len)); |
| 85 | 85 | ||
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 72373e59308e..f59642b2c935 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c | |||
| @@ -2145,7 +2145,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp) | |||
| 2145 | struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); | 2145 | struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); |
| 2146 | struct brcmf_fws_mac_descriptor *entry; | 2146 | struct brcmf_fws_mac_descriptor *entry; |
| 2147 | 2147 | ||
| 2148 | if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE) | 2148 | if (!ifp->ndev || !brcmf_fws_queue_skbs(fws)) |
| 2149 | return; | 2149 | return; |
| 2150 | 2150 | ||
| 2151 | entry = &fws->desc.iface[ifp->ifidx]; | 2151 | entry = &fws->desc.iface[ifp->ifidx]; |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index f36b96dc6acd..f878706613e6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | |||
| @@ -1650,16 +1650,23 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = { | |||
| 1650 | .write32 = brcmf_pcie_buscore_write32, | 1650 | .write32 = brcmf_pcie_buscore_write32, |
| 1651 | }; | 1651 | }; |
| 1652 | 1652 | ||
| 1653 | static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, | 1653 | static void brcmf_pcie_setup(struct device *dev, int ret, |
| 1654 | const struct firmware *fw, | ||
| 1654 | void *nvram, u32 nvram_len) | 1655 | void *nvram, u32 nvram_len) |
| 1655 | { | 1656 | { |
| 1656 | struct brcmf_bus *bus = dev_get_drvdata(dev); | 1657 | struct brcmf_bus *bus; |
| 1657 | struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie; | 1658 | struct brcmf_pciedev *pcie_bus_dev; |
| 1658 | struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo; | 1659 | struct brcmf_pciedev_info *devinfo; |
| 1659 | struct brcmf_commonring **flowrings; | 1660 | struct brcmf_commonring **flowrings; |
| 1660 | int ret; | ||
| 1661 | u32 i; | 1661 | u32 i; |
| 1662 | 1662 | ||
| 1663 | /* check firmware loading result */ | ||
| 1664 | if (ret) | ||
| 1665 | goto fail; | ||
| 1666 | |||
| 1667 | bus = dev_get_drvdata(dev); | ||
| 1668 | pcie_bus_dev = bus->bus_priv.pcie; | ||
| 1669 | devinfo = pcie_bus_dev->devinfo; | ||
| 1663 | brcmf_pcie_attach(devinfo); | 1670 | brcmf_pcie_attach(devinfo); |
| 1664 | 1671 | ||
| 1665 | /* Some of the firmwares have the size of the memory of the device | 1672 | /* Some of the firmwares have the size of the memory of the device |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index fc64b8913aa6..5653d6dd38f6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | |||
| @@ -3422,7 +3422,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev) | |||
| 3422 | /* otherwise, set txglomalign */ | 3422 | /* otherwise, set txglomalign */ |
| 3423 | value = sdiodev->settings->bus.sdio.sd_sgentry_align; | 3423 | value = sdiodev->settings->bus.sdio.sd_sgentry_align; |
| 3424 | /* SDIO ADMA requires at least 32 bit alignment */ | 3424 | /* SDIO ADMA requires at least 32 bit alignment */ |
| 3425 | value = max_t(u32, value, 4); | 3425 | value = max_t(u32, value, ALIGNMENT); |
| 3426 | err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value, | 3426 | err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value, |
| 3427 | sizeof(u32)); | 3427 | sizeof(u32)); |
| 3428 | } | 3428 | } |
| @@ -3982,21 +3982,26 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = { | |||
| 3982 | .get_memdump = brcmf_sdio_bus_get_memdump, | 3982 | .get_memdump = brcmf_sdio_bus_get_memdump, |
| 3983 | }; | 3983 | }; |
| 3984 | 3984 | ||
| 3985 | static void brcmf_sdio_firmware_callback(struct device *dev, | 3985 | static void brcmf_sdio_firmware_callback(struct device *dev, int err, |
| 3986 | const struct firmware *code, | 3986 | const struct firmware *code, |
| 3987 | void *nvram, u32 nvram_len) | 3987 | void *nvram, u32 nvram_len) |
| 3988 | { | 3988 | { |
| 3989 | struct brcmf_bus *bus_if = dev_get_drvdata(dev); | 3989 | struct brcmf_bus *bus_if; |
| 3990 | struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; | 3990 | struct brcmf_sdio_dev *sdiodev; |
| 3991 | struct brcmf_sdio *bus = sdiodev->bus; | 3991 | struct brcmf_sdio *bus; |
| 3992 | int err = 0; | ||
| 3993 | u8 saveclk; | 3992 | u8 saveclk; |
| 3994 | 3993 | ||
| 3995 | brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev)); | 3994 | brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err); |
| 3995 | bus_if = dev_get_drvdata(dev); | ||
| 3996 | sdiodev = bus_if->bus_priv.sdio; | ||
| 3997 | if (err) | ||
| 3998 | goto fail; | ||
| 3996 | 3999 | ||
| 3997 | if (!bus_if->drvr) | 4000 | if (!bus_if->drvr) |
| 3998 | return; | 4001 | return; |
| 3999 | 4002 | ||
| 4003 | bus = sdiodev->bus; | ||
| 4004 | |||
| 4000 | /* try to download image and nvram to the dongle */ | 4005 | /* try to download image and nvram to the dongle */ |
| 4001 | bus->alp_only = true; | 4006 | bus->alp_only = true; |
| 4002 | err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len); | 4007 | err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len); |
| @@ -4083,6 +4088,7 @@ release: | |||
| 4083 | fail: | 4088 | fail: |
| 4084 | brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); | 4089 | brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); |
| 4085 | device_release_driver(dev); | 4090 | device_release_driver(dev); |
| 4091 | device_release_driver(&sdiodev->func[2]->dev); | ||
| 4086 | } | 4092 | } |
| 4087 | 4093 | ||
| 4088 | struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) | 4094 | struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index e4d545f9edee..0eea48e73331 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c | |||
| @@ -1159,17 +1159,18 @@ fail: | |||
| 1159 | return ret; | 1159 | return ret; |
| 1160 | } | 1160 | } |
| 1161 | 1161 | ||
| 1162 | static void brcmf_usb_probe_phase2(struct device *dev, | 1162 | static void brcmf_usb_probe_phase2(struct device *dev, int ret, |
| 1163 | const struct firmware *fw, | 1163 | const struct firmware *fw, |
| 1164 | void *nvram, u32 nvlen) | 1164 | void *nvram, u32 nvlen) |
| 1165 | { | 1165 | { |
| 1166 | struct brcmf_bus *bus = dev_get_drvdata(dev); | 1166 | struct brcmf_bus *bus = dev_get_drvdata(dev); |
| 1167 | struct brcmf_usbdev_info *devinfo; | 1167 | struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo; |
| 1168 | int ret; | 1168 | |
| 1169 | if (ret) | ||
| 1170 | goto error; | ||
| 1169 | 1171 | ||
| 1170 | brcmf_dbg(USB, "Start fw downloading\n"); | 1172 | brcmf_dbg(USB, "Start fw downloading\n"); |
| 1171 | 1173 | ||
| 1172 | devinfo = bus->bus_priv.usb->devinfo; | ||
| 1173 | ret = check_file(fw->data); | 1174 | ret = check_file(fw->data); |
| 1174 | if (ret < 0) { | 1175 | if (ret < 0) { |
| 1175 | brcmf_err("invalid firmware\n"); | 1176 | brcmf_err("invalid firmware\n"); |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index 3b3e076571d6..45e2efc70d19 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c | |||
| @@ -79,8 +79,8 @@ | |||
| 79 | /* Lowest firmware API version supported */ | 79 | /* Lowest firmware API version supported */ |
| 80 | #define IWL7260_UCODE_API_MIN 17 | 80 | #define IWL7260_UCODE_API_MIN 17 |
| 81 | #define IWL7265_UCODE_API_MIN 17 | 81 | #define IWL7265_UCODE_API_MIN 17 |
| 82 | #define IWL7265D_UCODE_API_MIN 17 | 82 | #define IWL7265D_UCODE_API_MIN 22 |
| 83 | #define IWL3168_UCODE_API_MIN 20 | 83 | #define IWL3168_UCODE_API_MIN 22 |
| 84 | 84 | ||
| 85 | /* NVM versions */ | 85 | /* NVM versions */ |
| 86 | #define IWL7260_NVM_VERSION 0x0a1d | 86 | #define IWL7260_NVM_VERSION 0x0a1d |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index b9718c0cf174..89137717c1fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c | |||
| @@ -74,8 +74,8 @@ | |||
| 74 | #define IWL8265_UCODE_API_MAX 30 | 74 | #define IWL8265_UCODE_API_MAX 30 |
| 75 | 75 | ||
| 76 | /* Lowest firmware API version supported */ | 76 | /* Lowest firmware API version supported */ |
| 77 | #define IWL8000_UCODE_API_MIN 17 | 77 | #define IWL8000_UCODE_API_MIN 22 |
| 78 | #define IWL8265_UCODE_API_MIN 20 | 78 | #define IWL8265_UCODE_API_MIN 22 |
| 79 | 79 | ||
| 80 | /* NVM versions */ | 80 | /* NVM versions */ |
| 81 | #define IWL8000_NVM_VERSION 0x0a1d | 81 | #define IWL8000_NVM_VERSION 0x0a1d |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 306bc967742e..77efbb78e867 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h | |||
| @@ -370,6 +370,7 @@ | |||
| 370 | #define MON_DMARB_RD_DATA_ADDR (0xa03c5c) | 370 | #define MON_DMARB_RD_DATA_ADDR (0xa03c5c) |
| 371 | 371 | ||
| 372 | #define DBGC_IN_SAMPLE (0xa03c00) | 372 | #define DBGC_IN_SAMPLE (0xa03c00) |
| 373 | #define DBGC_OUT_CTRL (0xa03c0c) | ||
| 373 | 374 | ||
| 374 | /* enable the ID buf for read */ | 375 | /* enable the ID buf for read */ |
| 375 | #define WFPM_PS_CTL_CLR 0xA0300C | 376 | #define WFPM_PS_CTL_CLR 0xA0300C |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h index 1b7d265ffb0a..a10c6aae9ab9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h | |||
| @@ -307,6 +307,11 @@ enum { | |||
| 307 | /* Bit 1-3: LQ command color. Used to match responses to LQ commands */ | 307 | /* Bit 1-3: LQ command color. Used to match responses to LQ commands */ |
| 308 | #define LQ_FLAG_COLOR_POS 1 | 308 | #define LQ_FLAG_COLOR_POS 1 |
| 309 | #define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) | 309 | #define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) |
| 310 | #define LQ_FLAG_COLOR_GET(_f) (((_f) & LQ_FLAG_COLOR_MSK) >>\ | ||
| 311 | LQ_FLAG_COLOR_POS) | ||
| 312 | #define LQ_FLAGS_COLOR_INC(_c) ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\ | ||
| 313 | LQ_FLAG_COLOR_MSK) | ||
| 314 | #define LQ_FLAG_COLOR_SET(_f, _c) ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK)) | ||
| 310 | 315 | ||
| 311 | /* Bit 4-5: Tx RTS BW Signalling | 316 | /* Bit 4-5: Tx RTS BW Signalling |
| 312 | * (0) No RTS BW signalling | 317 | * (0) No RTS BW signalling |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h index 81b98915b1a4..1360ebfdc51b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h | |||
| @@ -519,8 +519,11 @@ struct agg_tx_status { | |||
| 519 | * bit-7 invalid rate indication | 519 | * bit-7 invalid rate indication |
| 520 | */ | 520 | */ |
| 521 | #define TX_RES_INIT_RATE_INDEX_MSK 0x0f | 521 | #define TX_RES_INIT_RATE_INDEX_MSK 0x0f |
| 522 | #define TX_RES_RATE_TABLE_COLOR_POS 4 | ||
| 522 | #define TX_RES_RATE_TABLE_COLOR_MSK 0x70 | 523 | #define TX_RES_RATE_TABLE_COLOR_MSK 0x70 |
| 523 | #define TX_RES_INV_RATE_INDEX_MSK 0x80 | 524 | #define TX_RES_INV_RATE_INDEX_MSK 0x80 |
| 525 | #define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\ | ||
| 526 | TX_RES_RATE_TABLE_COLOR_POS) | ||
| 524 | 527 | ||
| 525 | #define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) | 528 | #define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) |
| 526 | #define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) | 529 | #define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 7b86a4f1b574..c8712e6eea74 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | |||
| @@ -1002,14 +1002,6 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, | |||
| 1002 | return 0; | 1002 | return 0; |
| 1003 | } | 1003 | } |
| 1004 | 1004 | ||
| 1005 | static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm) | ||
| 1006 | { | ||
| 1007 | if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) | ||
| 1008 | iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); | ||
| 1009 | else | ||
| 1010 | iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1); | ||
| 1011 | } | ||
| 1012 | |||
| 1013 | int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) | 1005 | int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) |
| 1014 | { | 1006 | { |
| 1015 | u8 *ptr; | 1007 | u8 *ptr; |
| @@ -1023,10 +1015,8 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) | |||
| 1023 | /* EARLY START - firmware's configuration is hard coded */ | 1015 | /* EARLY START - firmware's configuration is hard coded */ |
| 1024 | if ((!mvm->fw->dbg_conf_tlv[conf_id] || | 1016 | if ((!mvm->fw->dbg_conf_tlv[conf_id] || |
| 1025 | !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && | 1017 | !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && |
| 1026 | conf_id == FW_DBG_START_FROM_ALIVE) { | 1018 | conf_id == FW_DBG_START_FROM_ALIVE) |
| 1027 | iwl_mvm_restart_early_start(mvm); | ||
| 1028 | return 0; | 1019 | return 0; |
| 1029 | } | ||
| 1030 | 1020 | ||
| 1031 | if (!mvm->fw->dbg_conf_tlv[conf_id]) | 1021 | if (!mvm->fw->dbg_conf_tlv[conf_id]) |
| 1032 | return -EINVAL; | 1022 | return -EINVAL; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 0f1831b41915..fd2fc46e2fe5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | |||
| @@ -1040,7 +1040,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, | |||
| 1040 | struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6; | 1040 | struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6; |
| 1041 | struct iwl_mac_beacon_cmd_v7 beacon_cmd; | 1041 | struct iwl_mac_beacon_cmd_v7 beacon_cmd; |
| 1042 | } u = {}; | 1042 | } u = {}; |
| 1043 | struct iwl_mac_beacon_cmd beacon_cmd; | 1043 | struct iwl_mac_beacon_cmd beacon_cmd = {}; |
| 1044 | struct ieee80211_tx_info *info; | 1044 | struct ieee80211_tx_info *info; |
| 1045 | u32 beacon_skb_len; | 1045 | u32 beacon_skb_len; |
| 1046 | u32 rate, tx_flags; | 1046 | u32 rate, tx_flags; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 4e74a6b90e70..52f8d7a6a7dc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | |||
| @@ -1730,8 +1730,11 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); | |||
| 1730 | */ | 1730 | */ |
| 1731 | static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) | 1731 | static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) |
| 1732 | { | 1732 | { |
| 1733 | u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE : | ||
| 1734 | IWL_MVM_CMD_QUEUE; | ||
| 1735 | |||
| 1733 | return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & | 1736 | return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & |
| 1734 | ~BIT(IWL_MVM_CMD_QUEUE)); | 1737 | ~BIT(cmd_queue)); |
| 1735 | } | 1738 | } |
| 1736 | 1739 | ||
| 1737 | static inline | 1740 | static inline |
| @@ -1753,6 +1756,7 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) | |||
| 1753 | if (!iwl_mvm_has_new_tx_api(mvm)) | 1756 | if (!iwl_mvm_has_new_tx_api(mvm)) |
| 1754 | iwl_free_fw_paging(mvm); | 1757 | iwl_free_fw_paging(mvm); |
| 1755 | mvm->ucode_loaded = false; | 1758 | mvm->ucode_loaded = false; |
| 1759 | mvm->fw_dbg_conf = FW_DBG_INVALID; | ||
| 1756 | iwl_trans_stop_device(mvm->trans); | 1760 | iwl_trans_stop_device(mvm->trans); |
| 1757 | } | 1761 | } |
| 1758 | 1762 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 9ffff6ed8133..3da5ec40aaea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c | |||
| @@ -1149,21 +1149,37 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) | |||
| 1149 | 1149 | ||
| 1150 | mutex_lock(&mvm->mutex); | 1150 | mutex_lock(&mvm->mutex); |
| 1151 | 1151 | ||
| 1152 | /* stop recording */ | ||
| 1153 | if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { | 1152 | if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { |
| 1153 | /* stop recording */ | ||
| 1154 | iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); | 1154 | iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); |
| 1155 | |||
| 1156 | iwl_mvm_fw_error_dump(mvm); | ||
| 1157 | |||
| 1158 | /* start recording again if the firmware is not crashed */ | ||
| 1159 | if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && | ||
| 1160 | mvm->fw->dbg_dest_tlv) | ||
| 1161 | iwl_clear_bits_prph(mvm->trans, | ||
| 1162 | MON_BUFF_SAMPLE_CTL, 0x100); | ||
| 1155 | } else { | 1163 | } else { |
| 1164 | u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE); | ||
| 1165 | u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL); | ||
| 1166 | |||
| 1167 | /* stop recording */ | ||
| 1156 | iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); | 1168 | iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); |
| 1157 | /* wait before we collect the data till the DBGC stop */ | ||
| 1158 | udelay(100); | 1169 | udelay(100); |
| 1159 | } | 1170 | iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0); |
| 1171 | /* wait before we collect the data till the DBGC stop */ | ||
| 1172 | udelay(500); | ||
| 1160 | 1173 | ||
| 1161 | iwl_mvm_fw_error_dump(mvm); | 1174 | iwl_mvm_fw_error_dump(mvm); |
| 1162 | 1175 | ||
| 1163 | /* start recording again if the firmware is not crashed */ | 1176 | /* start recording again if the firmware is not crashed */ |
| 1164 | WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) && | 1177 | if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && |
| 1165 | mvm->fw->dbg_dest_tlv && | 1178 | mvm->fw->dbg_dest_tlv) { |
| 1166 | iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf)); | 1179 | iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample); |
| 1180 | iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl); | ||
| 1181 | } | ||
| 1182 | } | ||
| 1167 | 1183 | ||
| 1168 | mutex_unlock(&mvm->mutex); | 1184 | mutex_unlock(&mvm->mutex); |
| 1169 | 1185 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 7788eefcd2bd..aa785cf3cf68 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * | 2 | * |
| 3 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. | 3 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. |
| 4 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 4 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 5 | * Copyright(c) 2016 Intel Deutschland GmbH | 5 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
| 6 | * | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
| 8 | * under the terms of version 2 of the GNU General Public License as | 8 | * under the terms of version 2 of the GNU General Public License as |
| @@ -1083,34 +1083,6 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta, | |||
| 1083 | rs_get_lower_rate_in_column(lq_sta, rate); | 1083 | rs_get_lower_rate_in_column(lq_sta, rate); |
| 1084 | } | 1084 | } |
| 1085 | 1085 | ||
| 1086 | /* Check if both rates are identical | ||
| 1087 | * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B | ||
| 1088 | * with a rate indicating STBC/BFER and ANT_AB. | ||
| 1089 | */ | ||
| 1090 | static inline bool rs_rate_equal(struct rs_rate *a, | ||
| 1091 | struct rs_rate *b, | ||
| 1092 | bool allow_ant_mismatch) | ||
| 1093 | |||
| 1094 | { | ||
| 1095 | bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) && | ||
| 1096 | (a->bfer == b->bfer); | ||
| 1097 | |||
| 1098 | if (allow_ant_mismatch) { | ||
| 1099 | if (a->stbc || a->bfer) { | ||
| 1100 | WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d", | ||
| 1101 | a->stbc, a->bfer, a->ant); | ||
| 1102 | ant_match |= (b->ant == ANT_A || b->ant == ANT_B); | ||
| 1103 | } else if (b->stbc || b->bfer) { | ||
| 1104 | WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d", | ||
| 1105 | b->stbc, b->bfer, b->ant); | ||
| 1106 | ant_match |= (a->ant == ANT_A || a->ant == ANT_B); | ||
| 1107 | } | ||
| 1108 | } | ||
| 1109 | |||
| 1110 | return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) && | ||
| 1111 | (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match; | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | /* Check if both rates share the same column */ | 1086 | /* Check if both rates share the same column */ |
| 1115 | static inline bool rs_rate_column_match(struct rs_rate *a, | 1087 | static inline bool rs_rate_column_match(struct rs_rate *a, |
| 1116 | struct rs_rate *b) | 1088 | struct rs_rate *b) |
| @@ -1182,12 +1154,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
| 1182 | u32 lq_hwrate; | 1154 | u32 lq_hwrate; |
| 1183 | struct rs_rate lq_rate, tx_resp_rate; | 1155 | struct rs_rate lq_rate, tx_resp_rate; |
| 1184 | struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; | 1156 | struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; |
| 1185 | u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0]; | 1157 | u32 tlc_info = (uintptr_t)info->status.status_driver_data[0]; |
| 1158 | u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK; | ||
| 1159 | u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info); | ||
| 1186 | u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; | 1160 | u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; |
| 1187 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | 1161 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
| 1188 | struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; | 1162 | struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; |
| 1189 | bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa, | ||
| 1190 | IWL_UCODE_TLV_API_LQ_SS_PARAMS); | ||
| 1191 | 1163 | ||
| 1192 | /* Treat uninitialized rate scaling data same as non-existing. */ | 1164 | /* Treat uninitialized rate scaling data same as non-existing. */ |
| 1193 | if (!lq_sta) { | 1165 | if (!lq_sta) { |
| @@ -1262,10 +1234,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
| 1262 | rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); | 1234 | rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); |
| 1263 | 1235 | ||
| 1264 | /* Here we actually compare this rate to the latest LQ command */ | 1236 | /* Here we actually compare this rate to the latest LQ command */ |
| 1265 | if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) { | 1237 | if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) { |
| 1266 | IWL_DEBUG_RATE(mvm, | 1238 | IWL_DEBUG_RATE(mvm, |
| 1267 | "initial tx resp rate 0x%x does not match 0x%x\n", | 1239 | "tx resp color 0x%x does not match 0x%x\n", |
| 1268 | tx_resp_hwrate, lq_hwrate); | 1240 | lq_color, LQ_FLAG_COLOR_GET(table->flags)); |
| 1269 | 1241 | ||
| 1270 | /* | 1242 | /* |
| 1271 | * Since rates mis-match, the last LQ command may have failed. | 1243 | * Since rates mis-match, the last LQ command may have failed. |
| @@ -3326,6 +3298,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm, | |||
| 3326 | u8 valid_tx_ant = 0; | 3298 | u8 valid_tx_ant = 0; |
| 3327 | struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; | 3299 | struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; |
| 3328 | bool toggle_ant = false; | 3300 | bool toggle_ant = false; |
| 3301 | u32 color; | ||
| 3329 | 3302 | ||
| 3330 | memcpy(&rate, initial_rate, sizeof(rate)); | 3303 | memcpy(&rate, initial_rate, sizeof(rate)); |
| 3331 | 3304 | ||
| @@ -3380,6 +3353,9 @@ static void rs_build_rates_table(struct iwl_mvm *mvm, | |||
| 3380 | num_rates, num_retries, valid_tx_ant, | 3353 | num_rates, num_retries, valid_tx_ant, |
| 3381 | toggle_ant); | 3354 | toggle_ant); |
| 3382 | 3355 | ||
| 3356 | /* update the color of the LQ command (as a counter at bits 1-3) */ | ||
| 3357 | color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags)); | ||
| 3358 | lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color); | ||
| 3383 | } | 3359 | } |
| 3384 | 3360 | ||
| 3385 | struct rs_bfer_active_iter_data { | 3361 | struct rs_bfer_active_iter_data { |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index ee207f2c0a90..3abde1cb0303 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | * | 2 | * |
| 3 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. | 3 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. |
| 4 | * Copyright(c) 2015 Intel Mobile Communications GmbH | 4 | * Copyright(c) 2015 Intel Mobile Communications GmbH |
| 5 | * Copyright(c) 2017 Intel Deutschland GmbH | ||
| 5 | * | 6 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of version 2 of the GNU General Public License as | 8 | * under the terms of version 2 of the GNU General Public License as |
| @@ -357,6 +358,20 @@ struct iwl_lq_sta { | |||
| 357 | } pers; | 358 | } pers; |
| 358 | }; | 359 | }; |
| 359 | 360 | ||
| 361 | /* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp | ||
| 362 | * Note, it's iwlmvm <-> mac80211 interface. | ||
| 363 | * bits 0-7: reduced tx power | ||
| 364 | * bits 8-10: LQ command's color | ||
| 365 | */ | ||
| 366 | #define RS_DRV_DATA_TXP_MSK 0xff | ||
| 367 | #define RS_DRV_DATA_LQ_COLOR_POS 8 | ||
| 368 | #define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS) | ||
| 369 | #define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\ | ||
| 370 | RS_DRV_DATA_LQ_COLOR_POS) | ||
| 371 | #define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\ | ||
| 372 | (((uintptr_t)_p) |\ | ||
| 373 | ((_c) << RS_DRV_DATA_LQ_COLOR_POS))) | ||
| 374 | |||
| 360 | /* Initialize station's rate scaling information after adding station */ | 375 | /* Initialize station's rate scaling information after adding station */ |
| 361 | void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 376 | void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
| 362 | enum nl80211_band band, bool init); | 377 | enum nl80211_band band, bool init); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index f5c786ddc526..614d67810d05 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c | |||
| @@ -2120,7 +2120,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
| 2120 | if (!iwl_mvm_is_dqa_supported(mvm)) | 2120 | if (!iwl_mvm_is_dqa_supported(mvm)) |
| 2121 | return 0; | 2121 | return 0; |
| 2122 | 2122 | ||
| 2123 | if (WARN_ON(vif->type != NL80211_IFTYPE_AP)) | 2123 | if (WARN_ON(vif->type != NL80211_IFTYPE_AP && |
| 2124 | vif->type != NL80211_IFTYPE_ADHOC)) | ||
| 2124 | return -ENOTSUPP; | 2125 | return -ENOTSUPP; |
| 2125 | 2126 | ||
| 2126 | /* | 2127 | /* |
| @@ -2155,6 +2156,16 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
| 2155 | mvmvif->cab_queue = queue; | 2156 | mvmvif->cab_queue = queue; |
| 2156 | } else if (!fw_has_api(&mvm->fw->ucode_capa, | 2157 | } else if (!fw_has_api(&mvm->fw->ucode_capa, |
| 2157 | IWL_UCODE_TLV_API_STA_TYPE)) { | 2158 | IWL_UCODE_TLV_API_STA_TYPE)) { |
| 2159 | /* | ||
| 2160 | * In IBSS, ieee80211_check_queues() sets the cab_queue to be | ||
| 2161 | * invalid, so make sure we use the queue we want. | ||
| 2162 | * Note that this is done here as we want to avoid making DQA | ||
| 2163 | * changes in mac80211 layer. | ||
| 2164 | */ | ||
| 2165 | if (vif->type == NL80211_IFTYPE_ADHOC) { | ||
| 2166 | vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; | ||
| 2167 | mvmvif->cab_queue = vif->cab_queue; | ||
| 2168 | } | ||
| 2158 | iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, | 2169 | iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, |
| 2159 | &cfg, timeout); | 2170 | &cfg, timeout); |
| 2160 | } | 2171 | } |
| @@ -3321,18 +3332,15 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, | |||
| 3321 | 3332 | ||
| 3322 | /* Get the station from the mvm local station table */ | 3333 | /* Get the station from the mvm local station table */ |
| 3323 | mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); | 3334 | mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); |
| 3324 | if (!mvm_sta) { | 3335 | if (mvm_sta) |
| 3325 | IWL_ERR(mvm, "Failed to find station\n"); | 3336 | sta_id = mvm_sta->sta_id; |
| 3326 | return -EINVAL; | ||
| 3327 | } | ||
| 3328 | sta_id = mvm_sta->sta_id; | ||
| 3329 | 3337 | ||
| 3330 | IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", | 3338 | IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", |
| 3331 | keyconf->keyidx, sta_id); | 3339 | keyconf->keyidx, sta_id); |
| 3332 | 3340 | ||
| 3333 | if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || | 3341 | if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || |
| 3334 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || | 3342 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || |
| 3335 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) | 3343 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) |
| 3336 | return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); | 3344 | return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); |
| 3337 | 3345 | ||
| 3338 | if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { | 3346 | if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 2716cb5483bf..ad62b67dceb2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h | |||
| @@ -313,6 +313,7 @@ enum iwl_mvm_agg_state { | |||
| 313 | * This is basically (last acked packet++). | 313 | * This is basically (last acked packet++). |
| 314 | * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the | 314 | * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the |
| 315 | * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). | 315 | * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). |
| 316 | * @lq_color: the color of the LQ command as it appears in tx response. | ||
| 316 | * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. | 317 | * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. |
| 317 | * @state: state of the BA agreement establishment / tear down. | 318 | * @state: state of the BA agreement establishment / tear down. |
| 318 | * @txq_id: Tx queue used by the BA session / DQA | 319 | * @txq_id: Tx queue used by the BA session / DQA |
| @@ -331,6 +332,7 @@ struct iwl_mvm_tid_data { | |||
| 331 | u16 next_reclaimed; | 332 | u16 next_reclaimed; |
| 332 | /* The rest is Tx AGG related */ | 333 | /* The rest is Tx AGG related */ |
| 333 | u32 rate_n_flags; | 334 | u32 rate_n_flags; |
| 335 | u8 lq_color; | ||
| 334 | bool amsdu_in_ampdu_allowed; | 336 | bool amsdu_in_ampdu_allowed; |
| 335 | enum iwl_mvm_agg_state state; | 337 | enum iwl_mvm_agg_state state; |
| 336 | u16 txq_id; | 338 | u16 txq_id; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index f9cbd197246f..506d58104e1c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c | |||
| @@ -790,11 +790,13 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, | |||
| 790 | struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); | 790 | struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); |
| 791 | int ret; | 791 | int ret; |
| 792 | 792 | ||
| 793 | if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) | ||
| 794 | return -EIO; | ||
| 795 | |||
| 796 | mutex_lock(&mvm->mutex); | 793 | mutex_lock(&mvm->mutex); |
| 797 | 794 | ||
| 795 | if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) { | ||
| 796 | ret = -EIO; | ||
| 797 | goto unlock; | ||
| 798 | } | ||
| 799 | |||
| 798 | if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { | 800 | if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { |
| 799 | ret = -EINVAL; | 801 | ret = -EINVAL; |
| 800 | goto unlock; | 802 | goto unlock; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index bcaceb64a6e8..f21901cd4a4f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
| @@ -1323,6 +1323,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, | |||
| 1323 | struct iwl_mvm_sta *mvmsta; | 1323 | struct iwl_mvm_sta *mvmsta; |
| 1324 | struct sk_buff_head skbs; | 1324 | struct sk_buff_head skbs; |
| 1325 | u8 skb_freed = 0; | 1325 | u8 skb_freed = 0; |
| 1326 | u8 lq_color; | ||
| 1326 | u16 next_reclaimed, seq_ctl; | 1327 | u16 next_reclaimed, seq_ctl; |
| 1327 | bool is_ndp = false; | 1328 | bool is_ndp = false; |
| 1328 | 1329 | ||
| @@ -1405,8 +1406,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, | |||
| 1405 | info->status.tx_time = | 1406 | info->status.tx_time = |
| 1406 | le16_to_cpu(tx_resp->wireless_media_time); | 1407 | le16_to_cpu(tx_resp->wireless_media_time); |
| 1407 | BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); | 1408 | BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); |
| 1409 | lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); | ||
| 1408 | info->status.status_driver_data[0] = | 1410 | info->status.status_driver_data[0] = |
| 1409 | (void *)(uintptr_t)tx_resp->reduced_tpc; | 1411 | RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc); |
| 1410 | 1412 | ||
| 1411 | ieee80211_tx_status(mvm->hw, skb); | 1413 | ieee80211_tx_status(mvm->hw, skb); |
| 1412 | } | 1414 | } |
| @@ -1638,6 +1640,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, | |||
| 1638 | le32_to_cpu(tx_resp->initial_rate); | 1640 | le32_to_cpu(tx_resp->initial_rate); |
| 1639 | mvmsta->tid_data[tid].tx_time = | 1641 | mvmsta->tid_data[tid].tx_time = |
| 1640 | le16_to_cpu(tx_resp->wireless_media_time); | 1642 | le16_to_cpu(tx_resp->wireless_media_time); |
| 1643 | mvmsta->tid_data[tid].lq_color = | ||
| 1644 | (tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >> | ||
| 1645 | TX_RES_RATE_TABLE_COLOR_POS; | ||
| 1641 | } | 1646 | } |
| 1642 | 1647 | ||
| 1643 | rcu_read_unlock(); | 1648 | rcu_read_unlock(); |
| @@ -1707,6 +1712,11 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, | |||
| 1707 | iwl_mvm_check_ratid_empty(mvm, sta, tid); | 1712 | iwl_mvm_check_ratid_empty(mvm, sta, tid); |
| 1708 | 1713 | ||
| 1709 | freed = 0; | 1714 | freed = 0; |
| 1715 | |||
| 1716 | /* pack lq color from tid_data along the reduced txp */ | ||
| 1717 | ba_info->status.status_driver_data[0] = | ||
| 1718 | RS_DRV_DATA_PACK(tid_data->lq_color, | ||
| 1719 | ba_info->status.status_driver_data[0]); | ||
| 1710 | ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; | 1720 | ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; |
| 1711 | 1721 | ||
| 1712 | skb_queue_walk(&reclaimed_skbs, skb) { | 1722 | skb_queue_walk(&reclaimed_skbs, skb) { |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 70acf850a9f1..93cbc7a69bcd 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c | |||
| @@ -2803,7 +2803,8 @@ static struct iwl_trans_dump_data | |||
| 2803 | #ifdef CONFIG_PM_SLEEP | 2803 | #ifdef CONFIG_PM_SLEEP |
| 2804 | static int iwl_trans_pcie_suspend(struct iwl_trans *trans) | 2804 | static int iwl_trans_pcie_suspend(struct iwl_trans *trans) |
| 2805 | { | 2805 | { |
| 2806 | if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) | 2806 | if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 && |
| 2807 | (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) | ||
| 2807 | return iwl_pci_fw_enter_d0i3(trans); | 2808 | return iwl_pci_fw_enter_d0i3(trans); |
| 2808 | 2809 | ||
| 2809 | return 0; | 2810 | return 0; |
| @@ -2811,7 +2812,8 @@ static int iwl_trans_pcie_suspend(struct iwl_trans *trans) | |||
| 2811 | 2812 | ||
| 2812 | static void iwl_trans_pcie_resume(struct iwl_trans *trans) | 2813 | static void iwl_trans_pcie_resume(struct iwl_trans *trans) |
| 2813 | { | 2814 | { |
| 2814 | if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) | 2815 | if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 && |
| 2816 | (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) | ||
| 2815 | iwl_pci_fw_exit_d0i3(trans); | 2817 | iwl_pci_fw_exit_d0i3(trans); |
| 2816 | } | 2818 | } |
| 2817 | #endif /* CONFIG_PM_SLEEP */ | 2819 | #endif /* CONFIG_PM_SLEEP */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 9fb46a6f47cf..9c9bfbbabdf1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | |||
| @@ -906,7 +906,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, | |||
| 906 | 906 | ||
| 907 | if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) { | 907 | if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) { |
| 908 | ret = -EINVAL; | 908 | ret = -EINVAL; |
| 909 | goto error; | 909 | goto error_free_resp; |
| 910 | } | 910 | } |
| 911 | 911 | ||
| 912 | rsp = (void *)hcmd.resp_pkt->data; | 912 | rsp = (void *)hcmd.resp_pkt->data; |
| @@ -915,13 +915,13 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, | |||
| 915 | if (qid > ARRAY_SIZE(trans_pcie->txq)) { | 915 | if (qid > ARRAY_SIZE(trans_pcie->txq)) { |
| 916 | WARN_ONCE(1, "queue index %d unsupported", qid); | 916 | WARN_ONCE(1, "queue index %d unsupported", qid); |
| 917 | ret = -EIO; | 917 | ret = -EIO; |
| 918 | goto error; | 918 | goto error_free_resp; |
| 919 | } | 919 | } |
| 920 | 920 | ||
| 921 | if (test_and_set_bit(qid, trans_pcie->queue_used)) { | 921 | if (test_and_set_bit(qid, trans_pcie->queue_used)) { |
| 922 | WARN_ONCE(1, "queue %d already used", qid); | 922 | WARN_ONCE(1, "queue %d already used", qid); |
| 923 | ret = -EIO; | 923 | ret = -EIO; |
| 924 | goto error; | 924 | goto error_free_resp; |
| 925 | } | 925 | } |
| 926 | 926 | ||
| 927 | txq->id = qid; | 927 | txq->id = qid; |
| @@ -934,8 +934,11 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, | |||
| 934 | (txq->write_ptr) | (qid << 16)); | 934 | (txq->write_ptr) | (qid << 16)); |
| 935 | IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); | 935 | IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); |
| 936 | 936 | ||
| 937 | iwl_free_resp(&hcmd); | ||
| 937 | return qid; | 938 | return qid; |
| 938 | 939 | ||
| 940 | error_free_resp: | ||
| 941 | iwl_free_resp(&hcmd); | ||
| 939 | error: | 942 | error: |
| 940 | iwl_pcie_gen2_txq_free_memory(trans, txq); | 943 | iwl_pcie_gen2_txq_free_memory(trans, txq); |
| 941 | return ret; | 944 | return ret; |
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c index 544fc09dcb62..1372b20f931e 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_main.c +++ b/drivers/net/wireless/intersil/hostap/hostap_main.c | |||
| @@ -73,7 +73,7 @@ struct net_device * hostap_add_interface(struct local_info *local, | |||
| 73 | dev->mem_end = mdev->mem_end; | 73 | dev->mem_end = mdev->mem_end; |
| 74 | 74 | ||
| 75 | hostap_setup_dev(dev, local, type); | 75 | hostap_setup_dev(dev, local, type); |
| 76 | dev->destructor = free_netdev; | 76 | dev->needs_free_netdev = true; |
| 77 | 77 | ||
| 78 | sprintf(dev->name, "%s%s", prefix, name); | 78 | sprintf(dev->name, "%s%s", prefix, name); |
| 79 | if (!rtnl_locked) | 79 | if (!rtnl_locked) |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 002b25cff5b6..c854a557998b 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -2861,7 +2861,7 @@ static const struct net_device_ops hwsim_netdev_ops = { | |||
| 2861 | static void hwsim_mon_setup(struct net_device *dev) | 2861 | static void hwsim_mon_setup(struct net_device *dev) |
| 2862 | { | 2862 | { |
| 2863 | dev->netdev_ops = &hwsim_netdev_ops; | 2863 | dev->netdev_ops = &hwsim_netdev_ops; |
| 2864 | dev->destructor = free_netdev; | 2864 | dev->needs_free_netdev = true; |
| 2865 | ether_setup(dev); | 2865 | ether_setup(dev); |
| 2866 | dev->priv_flags |= IFF_NO_QUEUE; | 2866 | dev->priv_flags |= IFF_NO_QUEUE; |
| 2867 | dev->type = ARPHRD_IEEE80211_RADIOTAP; | 2867 | dev->type = ARPHRD_IEEE80211_RADIOTAP; |
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index dd87b9ff64c3..39b6b5e3f6e0 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c | |||
| @@ -1280,7 +1280,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, | |||
| 1280 | struct net_device *dev) | 1280 | struct net_device *dev) |
| 1281 | { | 1281 | { |
| 1282 | dev->netdev_ops = &mwifiex_netdev_ops; | 1282 | dev->netdev_ops = &mwifiex_netdev_ops; |
| 1283 | dev->destructor = free_netdev; | 1283 | dev->needs_free_netdev = true; |
| 1284 | /* Initialize private structure */ | 1284 | /* Initialize private structure */ |
| 1285 | priv->current_key_index = 0; | 1285 | priv->current_key_index = 0; |
| 1286 | priv->media_connected = false; | 1286 | priv->media_connected = false; |
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 530586be05b4..5b1d2e8402d9 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
| @@ -199,6 +199,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ | |||
| 199 | unsigned long remaining_credit; | 199 | unsigned long remaining_credit; |
| 200 | struct timer_list credit_timeout; | 200 | struct timer_list credit_timeout; |
| 201 | u64 credit_window_start; | 201 | u64 credit_window_start; |
| 202 | bool rate_limited; | ||
| 202 | 203 | ||
| 203 | /* Statistics */ | 204 | /* Statistics */ |
| 204 | struct xenvif_stats stats; | 205 | struct xenvif_stats stats; |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 8397f6c92451..e322a862ddfe 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
| @@ -106,7 +106,11 @@ static int xenvif_poll(struct napi_struct *napi, int budget) | |||
| 106 | 106 | ||
| 107 | if (work_done < budget) { | 107 | if (work_done < budget) { |
| 108 | napi_complete_done(napi, work_done); | 108 | napi_complete_done(napi, work_done); |
| 109 | xenvif_napi_schedule_or_enable_events(queue); | 109 | /* If the queue is rate-limited, it shall be |
| 110 | * rescheduled in the timer callback. | ||
| 111 | */ | ||
| 112 | if (likely(!queue->rate_limited)) | ||
| 113 | xenvif_napi_schedule_or_enable_events(queue); | ||
| 110 | } | 114 | } |
| 111 | 115 | ||
| 112 | return work_done; | 116 | return work_done; |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 602d408fa25e..5042ff8d449a 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
| @@ -180,6 +180,7 @@ static void tx_add_credit(struct xenvif_queue *queue) | |||
| 180 | max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ | 180 | max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ |
| 181 | 181 | ||
| 182 | queue->remaining_credit = min(max_credit, max_burst); | 182 | queue->remaining_credit = min(max_credit, max_burst); |
| 183 | queue->rate_limited = false; | ||
| 183 | } | 184 | } |
| 184 | 185 | ||
| 185 | void xenvif_tx_credit_callback(unsigned long data) | 186 | void xenvif_tx_credit_callback(unsigned long data) |
| @@ -686,8 +687,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) | |||
| 686 | msecs_to_jiffies(queue->credit_usec / 1000); | 687 | msecs_to_jiffies(queue->credit_usec / 1000); |
| 687 | 688 | ||
| 688 | /* Timer could already be pending in rare cases. */ | 689 | /* Timer could already be pending in rare cases. */ |
| 689 | if (timer_pending(&queue->credit_timeout)) | 690 | if (timer_pending(&queue->credit_timeout)) { |
| 691 | queue->rate_limited = true; | ||
| 690 | return true; | 692 | return true; |
| 693 | } | ||
| 691 | 694 | ||
| 692 | /* Passed the point where we can replenish credit? */ | 695 | /* Passed the point where we can replenish credit? */ |
| 693 | if (time_after_eq64(now, next_credit)) { | 696 | if (time_after_eq64(now, next_credit)) { |
| @@ -702,6 +705,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) | |||
| 702 | mod_timer(&queue->credit_timeout, | 705 | mod_timer(&queue->credit_timeout, |
| 703 | next_credit); | 706 | next_credit); |
| 704 | queue->credit_window_start = next_credit; | 707 | queue->credit_window_start = next_credit; |
| 708 | queue->rate_limited = true; | ||
| 705 | 709 | ||
| 706 | return true; | 710 | return true; |
| 707 | } | 711 | } |
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c index c00238491673..7b3b6fd63d7d 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.c +++ b/drivers/ntb/hw/intel/ntb_hw_intel.c | |||
| @@ -2878,7 +2878,7 @@ static const struct intel_ntb_reg skx_reg = { | |||
| 2878 | .link_is_up = xeon_link_is_up, | 2878 | .link_is_up = xeon_link_is_up, |
| 2879 | .db_ioread = skx_db_ioread, | 2879 | .db_ioread = skx_db_ioread, |
| 2880 | .db_iowrite = skx_db_iowrite, | 2880 | .db_iowrite = skx_db_iowrite, |
| 2881 | .db_size = sizeof(u64), | 2881 | .db_size = sizeof(u32), |
| 2882 | .ntb_ctl = SKX_NTBCNTL_OFFSET, | 2882 | .ntb_ctl = SKX_NTBCNTL_OFFSET, |
| 2883 | .mw_bar = {2, 4}, | 2883 | .mw_bar = {2, 4}, |
| 2884 | }; | 2884 | }; |
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 02ca45fdd892..10e5bf460139 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c | |||
| @@ -177,14 +177,12 @@ struct ntb_transport_qp { | |||
| 177 | u64 rx_err_ver; | 177 | u64 rx_err_ver; |
| 178 | u64 rx_memcpy; | 178 | u64 rx_memcpy; |
| 179 | u64 rx_async; | 179 | u64 rx_async; |
| 180 | u64 dma_rx_prep_err; | ||
| 181 | u64 tx_bytes; | 180 | u64 tx_bytes; |
| 182 | u64 tx_pkts; | 181 | u64 tx_pkts; |
| 183 | u64 tx_ring_full; | 182 | u64 tx_ring_full; |
| 184 | u64 tx_err_no_buf; | 183 | u64 tx_err_no_buf; |
| 185 | u64 tx_memcpy; | 184 | u64 tx_memcpy; |
| 186 | u64 tx_async; | 185 | u64 tx_async; |
| 187 | u64 dma_tx_prep_err; | ||
| 188 | }; | 186 | }; |
| 189 | 187 | ||
| 190 | struct ntb_transport_mw { | 188 | struct ntb_transport_mw { |
| @@ -254,8 +252,6 @@ enum { | |||
| 254 | #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) | 252 | #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) |
| 255 | #define NTB_QP_DEF_NUM_ENTRIES 100 | 253 | #define NTB_QP_DEF_NUM_ENTRIES 100 |
| 256 | #define NTB_LINK_DOWN_TIMEOUT 10 | 254 | #define NTB_LINK_DOWN_TIMEOUT 10 |
| 257 | #define DMA_RETRIES 20 | ||
| 258 | #define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) | ||
| 259 | 255 | ||
| 260 | static void ntb_transport_rxc_db(unsigned long data); | 256 | static void ntb_transport_rxc_db(unsigned long data); |
| 261 | static const struct ntb_ctx_ops ntb_transport_ops; | 257 | static const struct ntb_ctx_ops ntb_transport_ops; |
| @@ -516,12 +512,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, | |||
| 516 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 512 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
| 517 | "free tx - \t%u\n", | 513 | "free tx - \t%u\n", |
| 518 | ntb_transport_tx_free_entry(qp)); | 514 | ntb_transport_tx_free_entry(qp)); |
| 519 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
| 520 | "DMA tx prep err - \t%llu\n", | ||
| 521 | qp->dma_tx_prep_err); | ||
| 522 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
| 523 | "DMA rx prep err - \t%llu\n", | ||
| 524 | qp->dma_rx_prep_err); | ||
| 525 | 515 | ||
| 526 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 516 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
| 527 | "\n"); | 517 | "\n"); |
| @@ -623,7 +613,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, | |||
| 623 | if (!mw->virt_addr) | 613 | if (!mw->virt_addr) |
| 624 | return -ENOMEM; | 614 | return -ENOMEM; |
| 625 | 615 | ||
| 626 | if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) | 616 | if (mw_num < qp_count % mw_count) |
| 627 | num_qps_mw = qp_count / mw_count + 1; | 617 | num_qps_mw = qp_count / mw_count + 1; |
| 628 | else | 618 | else |
| 629 | num_qps_mw = qp_count / mw_count; | 619 | num_qps_mw = qp_count / mw_count; |
| @@ -768,8 +758,6 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) | |||
| 768 | qp->tx_err_no_buf = 0; | 758 | qp->tx_err_no_buf = 0; |
| 769 | qp->tx_memcpy = 0; | 759 | qp->tx_memcpy = 0; |
| 770 | qp->tx_async = 0; | 760 | qp->tx_async = 0; |
| 771 | qp->dma_tx_prep_err = 0; | ||
| 772 | qp->dma_rx_prep_err = 0; | ||
| 773 | } | 761 | } |
| 774 | 762 | ||
| 775 | static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) | 763 | static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) |
| @@ -1000,7 +988,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, | |||
| 1000 | qp->event_handler = NULL; | 988 | qp->event_handler = NULL; |
| 1001 | ntb_qp_link_down_reset(qp); | 989 | ntb_qp_link_down_reset(qp); |
| 1002 | 990 | ||
| 1003 | if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) | 991 | if (mw_num < qp_count % mw_count) |
| 1004 | num_qps_mw = qp_count / mw_count + 1; | 992 | num_qps_mw = qp_count / mw_count + 1; |
| 1005 | else | 993 | else |
| 1006 | num_qps_mw = qp_count / mw_count; | 994 | num_qps_mw = qp_count / mw_count; |
| @@ -1128,8 +1116,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) | |||
| 1128 | qp_count = ilog2(qp_bitmap); | 1116 | qp_count = ilog2(qp_bitmap); |
| 1129 | if (max_num_clients && max_num_clients < qp_count) | 1117 | if (max_num_clients && max_num_clients < qp_count) |
| 1130 | qp_count = max_num_clients; | 1118 | qp_count = max_num_clients; |
| 1131 | else if (mw_count < qp_count) | 1119 | else if (nt->mw_count < qp_count) |
| 1132 | qp_count = mw_count; | 1120 | qp_count = nt->mw_count; |
| 1133 | 1121 | ||
| 1134 | qp_bitmap &= BIT_ULL(qp_count) - 1; | 1122 | qp_bitmap &= BIT_ULL(qp_count) - 1; |
| 1135 | 1123 | ||
| @@ -1317,7 +1305,6 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) | |||
| 1317 | struct dmaengine_unmap_data *unmap; | 1305 | struct dmaengine_unmap_data *unmap; |
| 1318 | dma_cookie_t cookie; | 1306 | dma_cookie_t cookie; |
| 1319 | void *buf = entry->buf; | 1307 | void *buf = entry->buf; |
| 1320 | int retries = 0; | ||
| 1321 | 1308 | ||
| 1322 | len = entry->len; | 1309 | len = entry->len; |
| 1323 | device = chan->device; | 1310 | device = chan->device; |
| @@ -1346,22 +1333,11 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) | |||
| 1346 | 1333 | ||
| 1347 | unmap->from_cnt = 1; | 1334 | unmap->from_cnt = 1; |
| 1348 | 1335 | ||
| 1349 | for (retries = 0; retries < DMA_RETRIES; retries++) { | 1336 | txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], |
| 1350 | txd = device->device_prep_dma_memcpy(chan, | 1337 | unmap->addr[0], len, |
| 1351 | unmap->addr[1], | 1338 | DMA_PREP_INTERRUPT); |
| 1352 | unmap->addr[0], len, | 1339 | if (!txd) |
| 1353 | DMA_PREP_INTERRUPT); | ||
| 1354 | if (txd) | ||
| 1355 | break; | ||
| 1356 | |||
| 1357 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1358 | schedule_timeout(DMA_OUT_RESOURCE_TO); | ||
| 1359 | } | ||
| 1360 | |||
| 1361 | if (!txd) { | ||
| 1362 | qp->dma_rx_prep_err++; | ||
| 1363 | goto err_get_unmap; | 1340 | goto err_get_unmap; |
| 1364 | } | ||
| 1365 | 1341 | ||
| 1366 | txd->callback_result = ntb_rx_copy_callback; | 1342 | txd->callback_result = ntb_rx_copy_callback; |
| 1367 | txd->callback_param = entry; | 1343 | txd->callback_param = entry; |
| @@ -1606,7 +1582,6 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp, | |||
| 1606 | struct dmaengine_unmap_data *unmap; | 1582 | struct dmaengine_unmap_data *unmap; |
| 1607 | dma_addr_t dest; | 1583 | dma_addr_t dest; |
| 1608 | dma_cookie_t cookie; | 1584 | dma_cookie_t cookie; |
| 1609 | int retries = 0; | ||
| 1610 | 1585 | ||
| 1611 | device = chan->device; | 1586 | device = chan->device; |
| 1612 | dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; | 1587 | dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; |
| @@ -1628,21 +1603,10 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp, | |||
| 1628 | 1603 | ||
| 1629 | unmap->to_cnt = 1; | 1604 | unmap->to_cnt = 1; |
| 1630 | 1605 | ||
| 1631 | for (retries = 0; retries < DMA_RETRIES; retries++) { | 1606 | txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, |
| 1632 | txd = device->device_prep_dma_memcpy(chan, dest, | 1607 | DMA_PREP_INTERRUPT); |
| 1633 | unmap->addr[0], len, | 1608 | if (!txd) |
| 1634 | DMA_PREP_INTERRUPT); | ||
| 1635 | if (txd) | ||
| 1636 | break; | ||
| 1637 | |||
| 1638 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1639 | schedule_timeout(DMA_OUT_RESOURCE_TO); | ||
| 1640 | } | ||
| 1641 | |||
| 1642 | if (!txd) { | ||
| 1643 | qp->dma_tx_prep_err++; | ||
| 1644 | goto err_get_unmap; | 1609 | goto err_get_unmap; |
| 1645 | } | ||
| 1646 | 1610 | ||
| 1647 | txd->callback_result = ntb_tx_copy_callback; | 1611 | txd->callback_result = ntb_tx_copy_callback; |
| 1648 | txd->callback_param = entry; | 1612 | txd->callback_param = entry; |
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 434e1d474f33..5cab2831ce99 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c | |||
| @@ -90,11 +90,11 @@ MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows"); | |||
| 90 | 90 | ||
| 91 | static unsigned int seg_order = 19; /* 512K */ | 91 | static unsigned int seg_order = 19; /* 512K */ |
| 92 | module_param(seg_order, uint, 0644); | 92 | module_param(seg_order, uint, 0644); |
| 93 | MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing"); | 93 | MODULE_PARM_DESC(seg_order, "size order [2^n] of buffer segment for testing"); |
| 94 | 94 | ||
| 95 | static unsigned int run_order = 32; /* 4G */ | 95 | static unsigned int run_order = 32; /* 4G */ |
| 96 | module_param(run_order, uint, 0644); | 96 | module_param(run_order, uint, 0644); |
| 97 | MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer"); | 97 | MODULE_PARM_DESC(run_order, "size order [2^n] of total data to transfer"); |
| 98 | 98 | ||
| 99 | static bool use_dma; /* default to 0 */ | 99 | static bool use_dma; /* default to 0 */ |
| 100 | module_param(use_dma, bool, 0644); | 100 | module_param(use_dma, bool, 0644); |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index a60926410438..903d5813023a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
| @@ -56,7 +56,7 @@ MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); | |||
| 56 | static int nvme_char_major; | 56 | static int nvme_char_major; |
| 57 | module_param(nvme_char_major, int, 0); | 57 | module_param(nvme_char_major, int, 0); |
| 58 | 58 | ||
| 59 | static unsigned long default_ps_max_latency_us = 25000; | 59 | static unsigned long default_ps_max_latency_us = 100000; |
| 60 | module_param(default_ps_max_latency_us, ulong, 0644); | 60 | module_param(default_ps_max_latency_us, ulong, 0644); |
| 61 | MODULE_PARM_DESC(default_ps_max_latency_us, | 61 | MODULE_PARM_DESC(default_ps_max_latency_us, |
| 62 | "max power saving latency for new devices; use PM QOS to change per device"); | 62 | "max power saving latency for new devices; use PM QOS to change per device"); |
| @@ -1342,7 +1342,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) | |||
| 1342 | * transitioning between power states. Therefore, when running | 1342 | * transitioning between power states. Therefore, when running |
| 1343 | * in any given state, we will enter the next lower-power | 1343 | * in any given state, we will enter the next lower-power |
| 1344 | * non-operational state after waiting 50 * (enlat + exlat) | 1344 | * non-operational state after waiting 50 * (enlat + exlat) |
| 1345 | * microseconds, as long as that state's total latency is under | 1345 | * microseconds, as long as that state's exit latency is under |
| 1346 | * the requested maximum latency. | 1346 | * the requested maximum latency. |
| 1347 | * | 1347 | * |
| 1348 | * We will not autonomously enter any non-operational state for | 1348 | * We will not autonomously enter any non-operational state for |
| @@ -1387,7 +1387,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) | |||
| 1387 | * lowest-power state, not the number of states. | 1387 | * lowest-power state, not the number of states. |
| 1388 | */ | 1388 | */ |
| 1389 | for (state = (int)ctrl->npss; state >= 0; state--) { | 1389 | for (state = (int)ctrl->npss; state >= 0; state--) { |
| 1390 | u64 total_latency_us, transition_ms; | 1390 | u64 total_latency_us, exit_latency_us, transition_ms; |
| 1391 | 1391 | ||
| 1392 | if (target) | 1392 | if (target) |
| 1393 | table->entries[state] = target; | 1393 | table->entries[state] = target; |
| @@ -1408,12 +1408,15 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) | |||
| 1408 | NVME_PS_FLAGS_NON_OP_STATE)) | 1408 | NVME_PS_FLAGS_NON_OP_STATE)) |
| 1409 | continue; | 1409 | continue; |
| 1410 | 1410 | ||
| 1411 | total_latency_us = | 1411 | exit_latency_us = |
| 1412 | (u64)le32_to_cpu(ctrl->psd[state].entry_lat) + | 1412 | (u64)le32_to_cpu(ctrl->psd[state].exit_lat); |
| 1413 | + le32_to_cpu(ctrl->psd[state].exit_lat); | 1413 | if (exit_latency_us > ctrl->ps_max_latency_us) |
| 1414 | if (total_latency_us > ctrl->ps_max_latency_us) | ||
| 1415 | continue; | 1414 | continue; |
| 1416 | 1415 | ||
| 1416 | total_latency_us = | ||
| 1417 | exit_latency_us + | ||
| 1418 | le32_to_cpu(ctrl->psd[state].entry_lat); | ||
| 1419 | |||
| 1417 | /* | 1420 | /* |
| 1418 | * This state is good. Use it as the APST idle | 1421 | * This state is good. Use it as the APST idle |
| 1419 | * target for higher power states. | 1422 | * target for higher power states. |
| @@ -2438,6 +2441,10 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) | |||
| 2438 | struct nvme_ns *ns; | 2441 | struct nvme_ns *ns; |
| 2439 | 2442 | ||
| 2440 | mutex_lock(&ctrl->namespaces_mutex); | 2443 | mutex_lock(&ctrl->namespaces_mutex); |
| 2444 | |||
| 2445 | /* Forcibly start all queues to avoid having stuck requests */ | ||
| 2446 | blk_mq_start_hw_queues(ctrl->admin_q); | ||
| 2447 | |||
| 2441 | list_for_each_entry(ns, &ctrl->namespaces, list) { | 2448 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
| 2442 | /* | 2449 | /* |
| 2443 | * Revalidating a dead namespace sets capacity to 0. This will | 2450 | * Revalidating a dead namespace sets capacity to 0. This will |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 5b14cbefb724..92964cef0f4b 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
| @@ -1139,6 +1139,7 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) | |||
| 1139 | /* *********************** NVME Ctrl Routines **************************** */ | 1139 | /* *********************** NVME Ctrl Routines **************************** */ |
| 1140 | 1140 | ||
| 1141 | static void __nvme_fc_final_op_cleanup(struct request *rq); | 1141 | static void __nvme_fc_final_op_cleanup(struct request *rq); |
| 1142 | static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); | ||
| 1142 | 1143 | ||
| 1143 | static int | 1144 | static int |
| 1144 | nvme_fc_reinit_request(void *data, struct request *rq) | 1145 | nvme_fc_reinit_request(void *data, struct request *rq) |
| @@ -1265,7 +1266,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) | |||
| 1265 | struct nvme_command *sqe = &op->cmd_iu.sqe; | 1266 | struct nvme_command *sqe = &op->cmd_iu.sqe; |
| 1266 | __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); | 1267 | __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); |
| 1267 | union nvme_result result; | 1268 | union nvme_result result; |
| 1268 | bool complete_rq; | 1269 | bool complete_rq, terminate_assoc = true; |
| 1269 | 1270 | ||
| 1270 | /* | 1271 | /* |
| 1271 | * WARNING: | 1272 | * WARNING: |
| @@ -1294,6 +1295,14 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) | |||
| 1294 | * fabricate a CQE, the following fields will not be set as they | 1295 | * fabricate a CQE, the following fields will not be set as they |
| 1295 | * are not referenced: | 1296 | * are not referenced: |
| 1296 | * cqe.sqid, cqe.sqhd, cqe.command_id | 1297 | * cqe.sqid, cqe.sqhd, cqe.command_id |
| 1298 | * | ||
| 1299 | * Failure or error of an individual i/o, in a transport | ||
| 1300 | * detected fashion unrelated to the nvme completion status, | ||
| 1301 | * potentially cause the initiator and target sides to get out | ||
| 1302 | * of sync on SQ head/tail (aka outstanding io count allowed). | ||
| 1303 | * Per FC-NVME spec, failure of an individual command requires | ||
| 1304 | * the connection to be terminated, which in turn requires the | ||
| 1305 | * association to be terminated. | ||
| 1297 | */ | 1306 | */ |
| 1298 | 1307 | ||
| 1299 | fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, | 1308 | fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, |
| @@ -1359,6 +1368,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) | |||
| 1359 | goto done; | 1368 | goto done; |
| 1360 | } | 1369 | } |
| 1361 | 1370 | ||
| 1371 | terminate_assoc = false; | ||
| 1372 | |||
| 1362 | done: | 1373 | done: |
| 1363 | if (op->flags & FCOP_FLAGS_AEN) { | 1374 | if (op->flags & FCOP_FLAGS_AEN) { |
| 1364 | nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); | 1375 | nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); |
| @@ -1366,7 +1377,7 @@ done: | |||
| 1366 | atomic_set(&op->state, FCPOP_STATE_IDLE); | 1377 | atomic_set(&op->state, FCPOP_STATE_IDLE); |
| 1367 | op->flags = FCOP_FLAGS_AEN; /* clear other flags */ | 1378 | op->flags = FCOP_FLAGS_AEN; /* clear other flags */ |
| 1368 | nvme_fc_ctrl_put(ctrl); | 1379 | nvme_fc_ctrl_put(ctrl); |
| 1369 | return; | 1380 | goto check_error; |
| 1370 | } | 1381 | } |
| 1371 | 1382 | ||
| 1372 | complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); | 1383 | complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); |
| @@ -1379,6 +1390,10 @@ done: | |||
| 1379 | nvme_end_request(rq, status, result); | 1390 | nvme_end_request(rq, status, result); |
| 1380 | } else | 1391 | } else |
| 1381 | __nvme_fc_final_op_cleanup(rq); | 1392 | __nvme_fc_final_op_cleanup(rq); |
| 1393 | |||
| 1394 | check_error: | ||
| 1395 | if (terminate_assoc) | ||
| 1396 | nvme_fc_error_recovery(ctrl, "transport detected io error"); | ||
| 1382 | } | 1397 | } |
| 1383 | 1398 | ||
| 1384 | static int | 1399 | static int |
| @@ -2791,6 +2806,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
| 2791 | ctrl->ctrl.opts = NULL; | 2806 | ctrl->ctrl.opts = NULL; |
| 2792 | /* initiate nvme ctrl ref counting teardown */ | 2807 | /* initiate nvme ctrl ref counting teardown */ |
| 2793 | nvme_uninit_ctrl(&ctrl->ctrl); | 2808 | nvme_uninit_ctrl(&ctrl->ctrl); |
| 2809 | nvme_put_ctrl(&ctrl->ctrl); | ||
| 2794 | 2810 | ||
| 2795 | /* as we're past the point where we transition to the ref | 2811 | /* as we're past the point where we transition to the ref |
| 2796 | * counting teardown path, if we return a bad pointer here, | 2812 | * counting teardown path, if we return a bad pointer here, |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d52701df7245..40c7581caeb0 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -1367,7 +1367,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) | |||
| 1367 | bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); | 1367 | bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); |
| 1368 | 1368 | ||
| 1369 | /* If there is a reset ongoing, we shouldn't reset again. */ | 1369 | /* If there is a reset ongoing, we shouldn't reset again. */ |
| 1370 | if (work_busy(&dev->reset_work)) | 1370 | if (dev->ctrl.state == NVME_CTRL_RESETTING) |
| 1371 | return false; | 1371 | return false; |
| 1372 | 1372 | ||
| 1373 | /* We shouldn't reset unless the controller is on fatal error state | 1373 | /* We shouldn't reset unless the controller is on fatal error state |
| @@ -1805,7 +1805,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) | |||
| 1805 | if (pci_is_enabled(pdev)) { | 1805 | if (pci_is_enabled(pdev)) { |
| 1806 | u32 csts = readl(dev->bar + NVME_REG_CSTS); | 1806 | u32 csts = readl(dev->bar + NVME_REG_CSTS); |
| 1807 | 1807 | ||
| 1808 | if (dev->ctrl.state == NVME_CTRL_LIVE) | 1808 | if (dev->ctrl.state == NVME_CTRL_LIVE || |
| 1809 | dev->ctrl.state == NVME_CTRL_RESETTING) | ||
| 1809 | nvme_start_freeze(&dev->ctrl); | 1810 | nvme_start_freeze(&dev->ctrl); |
| 1810 | dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || | 1811 | dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || |
| 1811 | pdev->error_state != pci_channel_io_normal); | 1812 | pdev->error_state != pci_channel_io_normal); |
| @@ -1903,7 +1904,7 @@ static void nvme_reset_work(struct work_struct *work) | |||
| 1903 | bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); | 1904 | bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); |
| 1904 | int result = -ENODEV; | 1905 | int result = -ENODEV; |
| 1905 | 1906 | ||
| 1906 | if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING)) | 1907 | if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) |
| 1907 | goto out; | 1908 | goto out; |
| 1908 | 1909 | ||
| 1909 | /* | 1910 | /* |
| @@ -1913,9 +1914,6 @@ static void nvme_reset_work(struct work_struct *work) | |||
| 1913 | if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) | 1914 | if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) |
| 1914 | nvme_dev_disable(dev, false); | 1915 | nvme_dev_disable(dev, false); |
| 1915 | 1916 | ||
| 1916 | if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) | ||
| 1917 | goto out; | ||
| 1918 | |||
| 1919 | result = nvme_pci_enable(dev); | 1917 | result = nvme_pci_enable(dev); |
| 1920 | if (result) | 1918 | if (result) |
| 1921 | goto out; | 1919 | goto out; |
| @@ -2009,8 +2007,8 @@ static int nvme_reset(struct nvme_dev *dev) | |||
| 2009 | { | 2007 | { |
| 2010 | if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) | 2008 | if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) |
| 2011 | return -ENODEV; | 2009 | return -ENODEV; |
| 2012 | if (work_busy(&dev->reset_work)) | 2010 | if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) |
| 2013 | return -ENODEV; | 2011 | return -EBUSY; |
| 2014 | if (!queue_work(nvme_workq, &dev->reset_work)) | 2012 | if (!queue_work(nvme_workq, &dev->reset_work)) |
| 2015 | return -EBUSY; | 2013 | return -EBUSY; |
| 2016 | return 0; | 2014 | return 0; |
| @@ -2136,6 +2134,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2136 | if (result) | 2134 | if (result) |
| 2137 | goto release_pools; | 2135 | goto release_pools; |
| 2138 | 2136 | ||
| 2137 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING); | ||
| 2139 | dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); | 2138 | dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); |
| 2140 | 2139 | ||
| 2141 | queue_work(nvme_workq, &dev->reset_work); | 2140 | queue_work(nvme_workq, &dev->reset_work); |
| @@ -2179,6 +2178,7 @@ static void nvme_remove(struct pci_dev *pdev) | |||
| 2179 | 2178 | ||
| 2180 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); | 2179 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); |
| 2181 | 2180 | ||
| 2181 | cancel_work_sync(&dev->reset_work); | ||
| 2182 | pci_set_drvdata(pdev, NULL); | 2182 | pci_set_drvdata(pdev, NULL); |
| 2183 | 2183 | ||
| 2184 | if (!pci_device_is_present(pdev)) { | 2184 | if (!pci_device_is_present(pdev)) { |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 28bd255c144d..24397d306d53 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
| @@ -753,28 +753,26 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) | |||
| 753 | if (ret) | 753 | if (ret) |
| 754 | goto requeue; | 754 | goto requeue; |
| 755 | 755 | ||
| 756 | blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true); | ||
| 757 | |||
| 758 | ret = nvmf_connect_admin_queue(&ctrl->ctrl); | 756 | ret = nvmf_connect_admin_queue(&ctrl->ctrl); |
| 759 | if (ret) | 757 | if (ret) |
| 760 | goto stop_admin_q; | 758 | goto requeue; |
| 761 | 759 | ||
| 762 | set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); | 760 | set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); |
| 763 | 761 | ||
| 764 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); | 762 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); |
| 765 | if (ret) | 763 | if (ret) |
| 766 | goto stop_admin_q; | 764 | goto requeue; |
| 767 | 765 | ||
| 768 | nvme_start_keep_alive(&ctrl->ctrl); | 766 | nvme_start_keep_alive(&ctrl->ctrl); |
| 769 | 767 | ||
| 770 | if (ctrl->queue_count > 1) { | 768 | if (ctrl->queue_count > 1) { |
| 771 | ret = nvme_rdma_init_io_queues(ctrl); | 769 | ret = nvme_rdma_init_io_queues(ctrl); |
| 772 | if (ret) | 770 | if (ret) |
| 773 | goto stop_admin_q; | 771 | goto requeue; |
| 774 | 772 | ||
| 775 | ret = nvme_rdma_connect_io_queues(ctrl); | 773 | ret = nvme_rdma_connect_io_queues(ctrl); |
| 776 | if (ret) | 774 | if (ret) |
| 777 | goto stop_admin_q; | 775 | goto requeue; |
| 778 | } | 776 | } |
| 779 | 777 | ||
| 780 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); | 778 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); |
| @@ -782,7 +780,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) | |||
| 782 | ctrl->ctrl.opts->nr_reconnects = 0; | 780 | ctrl->ctrl.opts->nr_reconnects = 0; |
| 783 | 781 | ||
| 784 | if (ctrl->queue_count > 1) { | 782 | if (ctrl->queue_count > 1) { |
| 785 | nvme_start_queues(&ctrl->ctrl); | ||
| 786 | nvme_queue_scan(&ctrl->ctrl); | 783 | nvme_queue_scan(&ctrl->ctrl); |
| 787 | nvme_queue_async_events(&ctrl->ctrl); | 784 | nvme_queue_async_events(&ctrl->ctrl); |
| 788 | } | 785 | } |
| @@ -791,8 +788,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) | |||
| 791 | 788 | ||
| 792 | return; | 789 | return; |
| 793 | 790 | ||
| 794 | stop_admin_q: | ||
| 795 | blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); | ||
| 796 | requeue: | 791 | requeue: |
| 797 | dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", | 792 | dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", |
| 798 | ctrl->ctrl.opts->nr_reconnects); | 793 | ctrl->ctrl.opts->nr_reconnects); |
| @@ -823,6 +818,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) | |||
| 823 | blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, | 818 | blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, |
| 824 | nvme_cancel_request, &ctrl->ctrl); | 819 | nvme_cancel_request, &ctrl->ctrl); |
| 825 | 820 | ||
| 821 | /* | ||
| 822 | * queues are not a live anymore, so restart the queues to fail fast | ||
| 823 | * new IO | ||
| 824 | */ | ||
| 825 | blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true); | ||
| 826 | nvme_start_queues(&ctrl->ctrl); | ||
| 827 | |||
| 826 | nvme_rdma_reconnect_or_remove(ctrl); | 828 | nvme_rdma_reconnect_or_remove(ctrl); |
| 827 | } | 829 | } |
| 828 | 830 | ||
| @@ -1433,7 +1435,7 @@ nvme_rdma_timeout(struct request *rq, bool reserved) | |||
| 1433 | /* | 1435 | /* |
| 1434 | * We cannot accept any other command until the Connect command has completed. | 1436 | * We cannot accept any other command until the Connect command has completed. |
| 1435 | */ | 1437 | */ |
| 1436 | static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, | 1438 | static inline int nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, |
| 1437 | struct request *rq) | 1439 | struct request *rq) |
| 1438 | { | 1440 | { |
| 1439 | if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { | 1441 | if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { |
| @@ -1441,11 +1443,22 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, | |||
| 1441 | 1443 | ||
| 1442 | if (!blk_rq_is_passthrough(rq) || | 1444 | if (!blk_rq_is_passthrough(rq) || |
| 1443 | cmd->common.opcode != nvme_fabrics_command || | 1445 | cmd->common.opcode != nvme_fabrics_command || |
| 1444 | cmd->fabrics.fctype != nvme_fabrics_type_connect) | 1446 | cmd->fabrics.fctype != nvme_fabrics_type_connect) { |
| 1445 | return false; | 1447 | /* |
| 1448 | * reconnecting state means transport disruption, which | ||
| 1449 | * can take a long time and even might fail permanently, | ||
| 1450 | * so we can't let incoming I/O be requeued forever. | ||
| 1451 | * fail it fast to allow upper layers a chance to | ||
| 1452 | * failover. | ||
| 1453 | */ | ||
| 1454 | if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING) | ||
| 1455 | return -EIO; | ||
| 1456 | else | ||
| 1457 | return -EAGAIN; | ||
| 1458 | } | ||
| 1446 | } | 1459 | } |
| 1447 | 1460 | ||
| 1448 | return true; | 1461 | return 0; |
| 1449 | } | 1462 | } |
| 1450 | 1463 | ||
| 1451 | static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, | 1464 | static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, |
| @@ -1463,8 +1476,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 1463 | 1476 | ||
| 1464 | WARN_ON_ONCE(rq->tag < 0); | 1477 | WARN_ON_ONCE(rq->tag < 0); |
| 1465 | 1478 | ||
| 1466 | if (!nvme_rdma_queue_is_ready(queue, rq)) | 1479 | ret = nvme_rdma_queue_is_ready(queue, rq); |
| 1467 | return BLK_MQ_RQ_QUEUE_BUSY; | 1480 | if (unlikely(ret)) |
| 1481 | goto err; | ||
| 1468 | 1482 | ||
| 1469 | dev = queue->device->dev; | 1483 | dev = queue->device->dev; |
| 1470 | ib_dma_sync_single_for_cpu(dev, sqe->dma, | 1484 | ib_dma_sync_single_for_cpu(dev, sqe->dma, |
diff --git a/drivers/of/device.c b/drivers/of/device.c index 9416d052cb89..28c38c756f92 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c | |||
| @@ -144,8 +144,8 @@ int of_dma_configure(struct device *dev, struct device_node *np) | |||
| 144 | coherent ? " " : " not "); | 144 | coherent ? " " : " not "); |
| 145 | 145 | ||
| 146 | iommu = of_iommu_configure(dev, np); | 146 | iommu = of_iommu_configure(dev, np); |
| 147 | if (IS_ERR(iommu)) | 147 | if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER) |
| 148 | return PTR_ERR(iommu); | 148 | return -EPROBE_DEFER; |
| 149 | 149 | ||
| 150 | dev_dbg(dev, "device is%sbehind an iommu\n", | 150 | dev_dbg(dev, "device is%sbehind an iommu\n", |
| 151 | iommu ? " " : " not "); | 151 | iommu ? " " : " not "); |
diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 74cf5fffb1e1..c80e37a69305 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c | |||
| @@ -896,7 +896,7 @@ int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) | |||
| 896 | { | 896 | { |
| 897 | if (pci_dev_is_disconnected(dev)) { | 897 | if (pci_dev_is_disconnected(dev)) { |
| 898 | *val = ~0; | 898 | *val = ~0; |
| 899 | return -ENODEV; | 899 | return PCIBIOS_DEVICE_NOT_FOUND; |
| 900 | } | 900 | } |
| 901 | return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); | 901 | return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); |
| 902 | } | 902 | } |
| @@ -906,7 +906,7 @@ int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val) | |||
| 906 | { | 906 | { |
| 907 | if (pci_dev_is_disconnected(dev)) { | 907 | if (pci_dev_is_disconnected(dev)) { |
| 908 | *val = ~0; | 908 | *val = ~0; |
| 909 | return -ENODEV; | 909 | return PCIBIOS_DEVICE_NOT_FOUND; |
| 910 | } | 910 | } |
| 911 | return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); | 911 | return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); |
| 912 | } | 912 | } |
| @@ -917,7 +917,7 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, | |||
| 917 | { | 917 | { |
| 918 | if (pci_dev_is_disconnected(dev)) { | 918 | if (pci_dev_is_disconnected(dev)) { |
| 919 | *val = ~0; | 919 | *val = ~0; |
| 920 | return -ENODEV; | 920 | return PCIBIOS_DEVICE_NOT_FOUND; |
| 921 | } | 921 | } |
| 922 | return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); | 922 | return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); |
| 923 | } | 923 | } |
| @@ -926,7 +926,7 @@ EXPORT_SYMBOL(pci_read_config_dword); | |||
| 926 | int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) | 926 | int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) |
| 927 | { | 927 | { |
| 928 | if (pci_dev_is_disconnected(dev)) | 928 | if (pci_dev_is_disconnected(dev)) |
| 929 | return -ENODEV; | 929 | return PCIBIOS_DEVICE_NOT_FOUND; |
| 930 | return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); | 930 | return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); |
| 931 | } | 931 | } |
| 932 | EXPORT_SYMBOL(pci_write_config_byte); | 932 | EXPORT_SYMBOL(pci_write_config_byte); |
| @@ -934,7 +934,7 @@ EXPORT_SYMBOL(pci_write_config_byte); | |||
| 934 | int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) | 934 | int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) |
| 935 | { | 935 | { |
| 936 | if (pci_dev_is_disconnected(dev)) | 936 | if (pci_dev_is_disconnected(dev)) |
| 937 | return -ENODEV; | 937 | return PCIBIOS_DEVICE_NOT_FOUND; |
| 938 | return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); | 938 | return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); |
| 939 | } | 939 | } |
| 940 | EXPORT_SYMBOL(pci_write_config_word); | 940 | EXPORT_SYMBOL(pci_write_config_word); |
| @@ -943,7 +943,7 @@ int pci_write_config_dword(const struct pci_dev *dev, int where, | |||
| 943 | u32 val) | 943 | u32 val) |
| 944 | { | 944 | { |
| 945 | if (pci_dev_is_disconnected(dev)) | 945 | if (pci_dev_is_disconnected(dev)) |
| 946 | return -ENODEV; | 946 | return PCIBIOS_DEVICE_NOT_FOUND; |
| 947 | return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); | 947 | return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); |
| 948 | } | 948 | } |
| 949 | EXPORT_SYMBOL(pci_write_config_dword); | 949 | EXPORT_SYMBOL(pci_write_config_dword); |
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig index 175edad42d2f..2942066607e0 100644 --- a/drivers/pci/endpoint/functions/Kconfig +++ b/drivers/pci/endpoint/functions/Kconfig | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | config PCI_EPF_TEST | 5 | config PCI_EPF_TEST |
| 6 | tristate "PCI Endpoint Test driver" | 6 | tristate "PCI Endpoint Test driver" |
| 7 | depends on PCI_ENDPOINT | 7 | depends on PCI_ENDPOINT |
| 8 | select CRC32 | ||
| 8 | help | 9 | help |
| 9 | Enable this configuration option to enable the test driver | 10 | Enable this configuration option to enable the test driver |
| 10 | for PCI Endpoint. | 11 | for PCI Endpoint. |
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index 34c862f213c7..0a9b78705ee8 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c | |||
| @@ -29,6 +29,17 @@ static int arm_pmu_acpi_register_irq(int cpu) | |||
| 29 | return -EINVAL; | 29 | return -EINVAL; |
| 30 | 30 | ||
| 31 | gsi = gicc->performance_interrupt; | 31 | gsi = gicc->performance_interrupt; |
| 32 | |||
| 33 | /* | ||
| 34 | * Per the ACPI spec, the MADT cannot describe a PMU that doesn't | ||
| 35 | * have an interrupt. QEMU advertises this by using a GSI of zero, | ||
| 36 | * which is not known to be valid on any hardware despite being | ||
| 37 | * valid per the spec. Take the pragmatic approach and reject a | ||
| 38 | * GSI of zero for now. | ||
| 39 | */ | ||
| 40 | if (!gsi) | ||
| 41 | return 0; | ||
| 42 | |||
| 32 | if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE) | 43 | if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE) |
| 33 | trigger = ACPI_EDGE_SENSITIVE; | 44 | trigger = ACPI_EDGE_SENSITIVE; |
| 34 | else | 45 | else |
diff --git a/drivers/phy/phy-qcom-qmp.c b/drivers/phy/phy-qcom-qmp.c index 727e23be7cac..78ca62897784 100644 --- a/drivers/phy/phy-qcom-qmp.c +++ b/drivers/phy/phy-qcom-qmp.c | |||
| @@ -844,7 +844,7 @@ static int qcom_qmp_phy_vreg_init(struct device *dev) | |||
| 844 | int num = qmp->cfg->num_vregs; | 844 | int num = qmp->cfg->num_vregs; |
| 845 | int i; | 845 | int i; |
| 846 | 846 | ||
| 847 | qmp->vregs = devm_kcalloc(dev, num, sizeof(qmp->vregs), GFP_KERNEL); | 847 | qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL); |
| 848 | if (!qmp->vregs) | 848 | if (!qmp->vregs) |
| 849 | return -ENOMEM; | 849 | return -ENOMEM; |
| 850 | 850 | ||
| @@ -983,16 +983,16 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id) | |||
| 983 | * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. | 983 | * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. |
| 984 | */ | 984 | */ |
| 985 | qphy->tx = of_iomap(np, 0); | 985 | qphy->tx = of_iomap(np, 0); |
| 986 | if (IS_ERR(qphy->tx)) | 986 | if (!qphy->tx) |
| 987 | return PTR_ERR(qphy->tx); | 987 | return -ENOMEM; |
| 988 | 988 | ||
| 989 | qphy->rx = of_iomap(np, 1); | 989 | qphy->rx = of_iomap(np, 1); |
| 990 | if (IS_ERR(qphy->rx)) | 990 | if (!qphy->rx) |
| 991 | return PTR_ERR(qphy->rx); | 991 | return -ENOMEM; |
| 992 | 992 | ||
| 993 | qphy->pcs = of_iomap(np, 2); | 993 | qphy->pcs = of_iomap(np, 2); |
| 994 | if (IS_ERR(qphy->pcs)) | 994 | if (!qphy->pcs) |
| 995 | return PTR_ERR(qphy->pcs); | 995 | return -ENOMEM; |
| 996 | 996 | ||
| 997 | /* | 997 | /* |
| 998 | * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3 | 998 | * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3 |
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 1653cbda6a82..bd459a93b0e7 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c | |||
| @@ -680,30 +680,16 @@ EXPORT_SYMBOL_GPL(pinctrl_generic_remove_group); | |||
| 680 | * pinctrl_generic_free_groups() - removes all pin groups | 680 | * pinctrl_generic_free_groups() - removes all pin groups |
| 681 | * @pctldev: pin controller device | 681 | * @pctldev: pin controller device |
| 682 | * | 682 | * |
| 683 | * Note that the caller must take care of locking. | 683 | * Note that the caller must take care of locking. The pinctrl groups |
| 684 | * are allocated with devm_kzalloc() so no need to free them here. | ||
| 684 | */ | 685 | */ |
| 685 | static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev) | 686 | static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev) |
| 686 | { | 687 | { |
| 687 | struct radix_tree_iter iter; | 688 | struct radix_tree_iter iter; |
| 688 | struct group_desc *group; | ||
| 689 | unsigned long *indices; | ||
| 690 | void **slot; | 689 | void **slot; |
| 691 | int i = 0; | ||
| 692 | |||
| 693 | indices = devm_kzalloc(pctldev->dev, sizeof(*indices) * | ||
| 694 | pctldev->num_groups, GFP_KERNEL); | ||
| 695 | if (!indices) | ||
| 696 | return; | ||
| 697 | 690 | ||
| 698 | radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0) | 691 | radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0) |
| 699 | indices[i++] = iter.index; | 692 | radix_tree_delete(&pctldev->pin_group_tree, iter.index); |
| 700 | |||
| 701 | for (i = 0; i < pctldev->num_groups; i++) { | ||
| 702 | group = radix_tree_lookup(&pctldev->pin_group_tree, | ||
| 703 | indices[i]); | ||
| 704 | radix_tree_delete(&pctldev->pin_group_tree, indices[i]); | ||
| 705 | devm_kfree(pctldev->dev, group); | ||
| 706 | } | ||
| 707 | 693 | ||
| 708 | pctldev->num_groups = 0; | 694 | pctldev->num_groups = 0; |
| 709 | } | 695 | } |
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c index 41b5b07d5a2b..6852010a6d70 100644 --- a/drivers/pinctrl/freescale/pinctrl-mxs.c +++ b/drivers/pinctrl/freescale/pinctrl-mxs.c | |||
| @@ -194,6 +194,16 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev, | |||
| 194 | return 0; | 194 | return 0; |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg) | ||
| 198 | { | ||
| 199 | u32 tmp; | ||
| 200 | |||
| 201 | tmp = readl(reg); | ||
| 202 | tmp &= ~(mask << shift); | ||
| 203 | tmp |= value << shift; | ||
| 204 | writel(tmp, reg); | ||
| 205 | } | ||
| 206 | |||
| 197 | static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, | 207 | static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, |
| 198 | unsigned group) | 208 | unsigned group) |
| 199 | { | 209 | { |
| @@ -211,8 +221,7 @@ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, | |||
| 211 | reg += bank * 0x20 + pin / 16 * 0x10; | 221 | reg += bank * 0x20 + pin / 16 * 0x10; |
| 212 | shift = pin % 16 * 2; | 222 | shift = pin % 16 * 2; |
| 213 | 223 | ||
| 214 | writel(0x3 << shift, reg + CLR); | 224 | mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg); |
| 215 | writel(g->muxsel[i] << shift, reg + SET); | ||
| 216 | } | 225 | } |
| 217 | 226 | ||
| 218 | return 0; | 227 | return 0; |
| @@ -279,8 +288,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev, | |||
| 279 | /* mA */ | 288 | /* mA */ |
| 280 | if (config & MA_PRESENT) { | 289 | if (config & MA_PRESENT) { |
| 281 | shift = pin % 8 * 4; | 290 | shift = pin % 8 * 4; |
| 282 | writel(0x3 << shift, reg + CLR); | 291 | mxs_pinctrl_rmwl(ma, 0x3, shift, reg); |
| 283 | writel(ma << shift, reg + SET); | ||
| 284 | } | 292 | } |
| 285 | 293 | ||
| 286 | /* vol */ | 294 | /* vol */ |
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 2debba62fac9..20f1b4493994 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
| @@ -1539,15 +1539,29 @@ static void chv_gpio_irq_handler(struct irq_desc *desc) | |||
| 1539 | * is not listed below. | 1539 | * is not listed below. |
| 1540 | */ | 1540 | */ |
| 1541 | static const struct dmi_system_id chv_no_valid_mask[] = { | 1541 | static const struct dmi_system_id chv_no_valid_mask[] = { |
| 1542 | /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */ | ||
| 1542 | { | 1543 | { |
| 1543 | /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */ | 1544 | .ident = "Intel_Strago based Chromebooks (All models)", |
| 1544 | .ident = "Acer Chromebook (CYAN)", | ||
| 1545 | .matches = { | 1545 | .matches = { |
| 1546 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), | 1546 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), |
| 1547 | DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"), | 1547 | DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), |
| 1548 | DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"), | ||
| 1549 | }, | 1548 | }, |
| 1550 | } | 1549 | }, |
| 1550 | { | ||
| 1551 | .ident = "Acer Chromebook R11 (Cyan)", | ||
| 1552 | .matches = { | ||
| 1553 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), | ||
| 1554 | DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), | ||
| 1555 | }, | ||
| 1556 | }, | ||
| 1557 | { | ||
| 1558 | .ident = "Samsung Chromebook 3 (Celes)", | ||
| 1559 | .matches = { | ||
| 1560 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), | ||
| 1561 | DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), | ||
| 1562 | }, | ||
| 1563 | }, | ||
| 1564 | {} | ||
| 1551 | }; | 1565 | }; |
| 1552 | 1566 | ||
| 1553 | static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) | 1567 | static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) |
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index 0d6b7f4b82af..720a19fd38d2 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c | |||
| @@ -35,7 +35,6 @@ static const struct pin_config_item conf_items[] = { | |||
| 35 | PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, | 35 | PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, |
| 36 | "input bias pull to pin specific state", NULL, false), | 36 | "input bias pull to pin specific state", NULL, false), |
| 37 | PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false), | 37 | PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false), |
| 38 | PCONFDUMP(PIN_CONFIG_BIDIRECTIONAL, "bi-directional pin operations", NULL, false), | ||
| 39 | PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false), | 38 | PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false), |
| 40 | PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false), | 39 | PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false), |
| 41 | PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false), | 40 | PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false), |
| @@ -161,7 +160,6 @@ static const struct pinconf_generic_params dt_params[] = { | |||
| 161 | { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 }, | 160 | { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 }, |
| 162 | { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 }, | 161 | { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 }, |
| 163 | { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 }, | 162 | { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 }, |
| 164 | { "bi-directional", PIN_CONFIG_BIDIRECTIONAL, 1 }, | ||
| 165 | { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 }, | 163 | { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 }, |
| 166 | { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 }, | 164 | { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 }, |
| 167 | { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 }, | 165 | { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 }, |
| @@ -174,7 +172,6 @@ static const struct pinconf_generic_params dt_params[] = { | |||
| 174 | { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 }, | 172 | { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 }, |
| 175 | { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 }, | 173 | { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 }, |
| 176 | { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 }, | 174 | { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 }, |
| 177 | { "output-enable", PIN_CONFIG_OUTPUT, 1, }, | ||
| 178 | { "output-high", PIN_CONFIG_OUTPUT, 1, }, | 175 | { "output-high", PIN_CONFIG_OUTPUT, 1, }, |
| 179 | { "output-low", PIN_CONFIG_OUTPUT, 0, }, | 176 | { "output-low", PIN_CONFIG_OUTPUT, 0, }, |
| 180 | { "power-source", PIN_CONFIG_POWER_SOURCE, 0 }, | 177 | { "power-source", PIN_CONFIG_POWER_SOURCE, 0 }, |
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 1482d132fbb8..e432ec887479 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c | |||
| @@ -495,64 +495,54 @@ static struct irq_chip amd_gpio_irqchip = { | |||
| 495 | .flags = IRQCHIP_SKIP_SET_WAKE, | 495 | .flags = IRQCHIP_SKIP_SET_WAKE, |
| 496 | }; | 496 | }; |
| 497 | 497 | ||
| 498 | static void amd_gpio_irq_handler(struct irq_desc *desc) | 498 | #define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF)) |
| 499 | |||
| 500 | static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) | ||
| 499 | { | 501 | { |
| 500 | u32 i; | 502 | struct amd_gpio *gpio_dev = dev_id; |
| 501 | u32 off; | 503 | struct gpio_chip *gc = &gpio_dev->gc; |
| 502 | u32 reg; | 504 | irqreturn_t ret = IRQ_NONE; |
| 503 | u32 pin_reg; | 505 | unsigned int i, irqnr; |
| 504 | u64 reg64; | ||
| 505 | int handled = 0; | ||
| 506 | unsigned int irq; | ||
| 507 | unsigned long flags; | 506 | unsigned long flags; |
| 508 | struct irq_chip *chip = irq_desc_get_chip(desc); | 507 | u32 *regs, regval; |
| 509 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); | 508 | u64 status, mask; |
| 510 | struct amd_gpio *gpio_dev = gpiochip_get_data(gc); | ||
| 511 | 509 | ||
| 512 | chained_irq_enter(chip, desc); | 510 | /* Read the wake status */ |
| 513 | /*enable GPIO interrupt again*/ | ||
| 514 | raw_spin_lock_irqsave(&gpio_dev->lock, flags); | 511 | raw_spin_lock_irqsave(&gpio_dev->lock, flags); |
| 515 | reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); | 512 | status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); |
| 516 | reg64 = reg; | 513 | status <<= 32; |
| 517 | reg64 = reg64 << 32; | 514 | status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0); |
| 518 | |||
| 519 | reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0); | ||
| 520 | reg64 |= reg; | ||
| 521 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); | 515 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); |
| 522 | 516 | ||
| 523 | /* | 517 | /* Bit 0-45 contain the relevant status bits */ |
| 524 | * first 46 bits indicates interrupt status. | 518 | status &= (1ULL << 46) - 1; |
| 525 | * one bit represents four interrupt sources. | 519 | regs = gpio_dev->base; |
| 526 | */ | 520 | for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) { |
| 527 | for (off = 0; off < 46 ; off++) { | 521 | if (!(status & mask)) |
| 528 | if (reg64 & BIT(off)) { | 522 | continue; |
| 529 | for (i = 0; i < 4; i++) { | 523 | status &= ~mask; |
| 530 | pin_reg = readl(gpio_dev->base + | 524 | |
| 531 | (off * 4 + i) * 4); | 525 | /* Each status bit covers four pins */ |
| 532 | if ((pin_reg & BIT(INTERRUPT_STS_OFF)) || | 526 | for (i = 0; i < 4; i++) { |
| 533 | (pin_reg & BIT(WAKE_STS_OFF))) { | 527 | regval = readl(regs + i); |
| 534 | irq = irq_find_mapping(gc->irqdomain, | 528 | if (!(regval & PIN_IRQ_PENDING)) |
| 535 | off * 4 + i); | 529 | continue; |
| 536 | generic_handle_irq(irq); | 530 | irq = irq_find_mapping(gc->irqdomain, irqnr + i); |
| 537 | writel(pin_reg, | 531 | generic_handle_irq(irq); |
| 538 | gpio_dev->base | 532 | /* Clear interrupt */ |
| 539 | + (off * 4 + i) * 4); | 533 | writel(regval, regs + i); |
| 540 | handled++; | 534 | ret = IRQ_HANDLED; |
| 541 | } | ||
| 542 | } | ||
| 543 | } | 535 | } |
| 544 | } | 536 | } |
| 545 | 537 | ||
| 546 | if (handled == 0) | 538 | /* Signal EOI to the GPIO unit */ |
| 547 | handle_bad_irq(desc); | ||
| 548 | |||
| 549 | raw_spin_lock_irqsave(&gpio_dev->lock, flags); | 539 | raw_spin_lock_irqsave(&gpio_dev->lock, flags); |
| 550 | reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); | 540 | regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG); |
| 551 | reg |= EOI_MASK; | 541 | regval |= EOI_MASK; |
| 552 | writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG); | 542 | writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG); |
| 553 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); | 543 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); |
| 554 | 544 | ||
| 555 | chained_irq_exit(chip, desc); | 545 | return ret; |
| 556 | } | 546 | } |
| 557 | 547 | ||
| 558 | static int amd_get_groups_count(struct pinctrl_dev *pctldev) | 548 | static int amd_get_groups_count(struct pinctrl_dev *pctldev) |
| @@ -821,10 +811,11 @@ static int amd_gpio_probe(struct platform_device *pdev) | |||
| 821 | goto out2; | 811 | goto out2; |
| 822 | } | 812 | } |
| 823 | 813 | ||
| 824 | gpiochip_set_chained_irqchip(&gpio_dev->gc, | 814 | ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0, |
| 825 | &amd_gpio_irqchip, | 815 | KBUILD_MODNAME, gpio_dev); |
| 826 | irq_base, | 816 | if (ret) |
| 827 | amd_gpio_irq_handler); | 817 | goto out2; |
| 818 | |||
| 828 | platform_set_drvdata(pdev, gpio_dev); | 819 | platform_set_drvdata(pdev, gpio_dev); |
| 829 | 820 | ||
| 830 | dev_dbg(&pdev->dev, "amd gpio driver loaded\n"); | 821 | dev_dbg(&pdev->dev, "amd gpio driver loaded\n"); |
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index f141aa0430b1..9dd981ddbb17 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c | |||
| @@ -143,9 +143,6 @@ struct rockchip_drv { | |||
| 143 | * @gpio_chip: gpiolib chip | 143 | * @gpio_chip: gpiolib chip |
| 144 | * @grange: gpio range | 144 | * @grange: gpio range |
| 145 | * @slock: spinlock for the gpio bank | 145 | * @slock: spinlock for the gpio bank |
| 146 | * @irq_lock: bus lock for irq chip | ||
| 147 | * @new_irqs: newly configured irqs which must be muxed as GPIOs in | ||
| 148 | * irq_bus_sync_unlock() | ||
| 149 | */ | 146 | */ |
| 150 | struct rockchip_pin_bank { | 147 | struct rockchip_pin_bank { |
| 151 | void __iomem *reg_base; | 148 | void __iomem *reg_base; |
| @@ -168,8 +165,6 @@ struct rockchip_pin_bank { | |||
| 168 | struct pinctrl_gpio_range grange; | 165 | struct pinctrl_gpio_range grange; |
| 169 | raw_spinlock_t slock; | 166 | raw_spinlock_t slock; |
| 170 | u32 toggle_edge_mode; | 167 | u32 toggle_edge_mode; |
| 171 | struct mutex irq_lock; | ||
| 172 | u32 new_irqs; | ||
| 173 | }; | 168 | }; |
| 174 | 169 | ||
| 175 | #define PIN_BANK(id, pins, label) \ | 170 | #define PIN_BANK(id, pins, label) \ |
| @@ -2134,12 +2129,11 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) | |||
| 2134 | int ret; | 2129 | int ret; |
| 2135 | 2130 | ||
| 2136 | /* make sure the pin is configured as gpio input */ | 2131 | /* make sure the pin is configured as gpio input */ |
| 2137 | ret = rockchip_verify_mux(bank, d->hwirq, RK_FUNC_GPIO); | 2132 | ret = rockchip_set_mux(bank, d->hwirq, RK_FUNC_GPIO); |
| 2138 | if (ret < 0) | 2133 | if (ret < 0) |
| 2139 | return ret; | 2134 | return ret; |
| 2140 | 2135 | ||
| 2141 | bank->new_irqs |= mask; | 2136 | clk_enable(bank->clk); |
| 2142 | |||
| 2143 | raw_spin_lock_irqsave(&bank->slock, flags); | 2137 | raw_spin_lock_irqsave(&bank->slock, flags); |
| 2144 | 2138 | ||
| 2145 | data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR); | 2139 | data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR); |
| @@ -2197,6 +2191,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) | |||
| 2197 | default: | 2191 | default: |
| 2198 | irq_gc_unlock(gc); | 2192 | irq_gc_unlock(gc); |
| 2199 | raw_spin_unlock_irqrestore(&bank->slock, flags); | 2193 | raw_spin_unlock_irqrestore(&bank->slock, flags); |
| 2194 | clk_disable(bank->clk); | ||
| 2200 | return -EINVAL; | 2195 | return -EINVAL; |
| 2201 | } | 2196 | } |
| 2202 | 2197 | ||
| @@ -2205,6 +2200,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) | |||
| 2205 | 2200 | ||
| 2206 | irq_gc_unlock(gc); | 2201 | irq_gc_unlock(gc); |
| 2207 | raw_spin_unlock_irqrestore(&bank->slock, flags); | 2202 | raw_spin_unlock_irqrestore(&bank->slock, flags); |
| 2203 | clk_disable(bank->clk); | ||
| 2208 | 2204 | ||
| 2209 | return 0; | 2205 | return 0; |
| 2210 | } | 2206 | } |
| @@ -2248,34 +2244,6 @@ static void rockchip_irq_disable(struct irq_data *d) | |||
| 2248 | clk_disable(bank->clk); | 2244 | clk_disable(bank->clk); |
| 2249 | } | 2245 | } |
| 2250 | 2246 | ||
| 2251 | static void rockchip_irq_bus_lock(struct irq_data *d) | ||
| 2252 | { | ||
| 2253 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
| 2254 | struct rockchip_pin_bank *bank = gc->private; | ||
| 2255 | |||
| 2256 | clk_enable(bank->clk); | ||
| 2257 | mutex_lock(&bank->irq_lock); | ||
| 2258 | } | ||
| 2259 | |||
| 2260 | static void rockchip_irq_bus_sync_unlock(struct irq_data *d) | ||
| 2261 | { | ||
| 2262 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
| 2263 | struct rockchip_pin_bank *bank = gc->private; | ||
| 2264 | |||
| 2265 | while (bank->new_irqs) { | ||
| 2266 | unsigned int irq = __ffs(bank->new_irqs); | ||
| 2267 | int ret; | ||
| 2268 | |||
| 2269 | ret = rockchip_set_mux(bank, irq, RK_FUNC_GPIO); | ||
| 2270 | WARN_ON(ret < 0); | ||
| 2271 | |||
| 2272 | bank->new_irqs &= ~BIT(irq); | ||
| 2273 | } | ||
| 2274 | |||
| 2275 | mutex_unlock(&bank->irq_lock); | ||
| 2276 | clk_disable(bank->clk); | ||
| 2277 | } | ||
| 2278 | |||
| 2279 | static int rockchip_interrupts_register(struct platform_device *pdev, | 2247 | static int rockchip_interrupts_register(struct platform_device *pdev, |
| 2280 | struct rockchip_pinctrl *info) | 2248 | struct rockchip_pinctrl *info) |
| 2281 | { | 2249 | { |
| @@ -2342,9 +2310,6 @@ static int rockchip_interrupts_register(struct platform_device *pdev, | |||
| 2342 | gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend; | 2310 | gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend; |
| 2343 | gc->chip_types[0].chip.irq_resume = rockchip_irq_resume; | 2311 | gc->chip_types[0].chip.irq_resume = rockchip_irq_resume; |
| 2344 | gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type; | 2312 | gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type; |
| 2345 | gc->chip_types[0].chip.irq_bus_lock = rockchip_irq_bus_lock; | ||
| 2346 | gc->chip_types[0].chip.irq_bus_sync_unlock = | ||
| 2347 | rockchip_irq_bus_sync_unlock; | ||
| 2348 | gc->wake_enabled = IRQ_MSK(bank->nr_pins); | 2313 | gc->wake_enabled = IRQ_MSK(bank->nr_pins); |
| 2349 | 2314 | ||
| 2350 | irq_set_chained_handler_and_data(bank->irq, | 2315 | irq_set_chained_handler_and_data(bank->irq, |
| @@ -2518,7 +2483,6 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data( | |||
| 2518 | int bank_pins = 0; | 2483 | int bank_pins = 0; |
| 2519 | 2484 | ||
| 2520 | raw_spin_lock_init(&bank->slock); | 2485 | raw_spin_lock_init(&bank->slock); |
| 2521 | mutex_init(&bank->irq_lock); | ||
| 2522 | bank->drvdata = d; | 2486 | bank->drvdata = d; |
| 2523 | bank->pin_base = ctrl->nr_pins; | 2487 | bank->pin_base = ctrl->nr_pins; |
| 2524 | ctrl->nr_pins += bank->nr_pins; | 2488 | ctrl->nr_pins += bank->nr_pins; |
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index 9fd6d9087dc5..16b3ae5e4f44 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c | |||
| @@ -826,30 +826,17 @@ EXPORT_SYMBOL_GPL(pinmux_generic_remove_function); | |||
| 826 | * pinmux_generic_free_functions() - removes all functions | 826 | * pinmux_generic_free_functions() - removes all functions |
| 827 | * @pctldev: pin controller device | 827 | * @pctldev: pin controller device |
| 828 | * | 828 | * |
| 829 | * Note that the caller must take care of locking. | 829 | * Note that the caller must take care of locking. The pinctrl |
| 830 | * functions are allocated with devm_kzalloc() so no need to free | ||
| 831 | * them here. | ||
| 830 | */ | 832 | */ |
| 831 | void pinmux_generic_free_functions(struct pinctrl_dev *pctldev) | 833 | void pinmux_generic_free_functions(struct pinctrl_dev *pctldev) |
| 832 | { | 834 | { |
| 833 | struct radix_tree_iter iter; | 835 | struct radix_tree_iter iter; |
| 834 | struct function_desc *function; | ||
| 835 | unsigned long *indices; | ||
| 836 | void **slot; | 836 | void **slot; |
| 837 | int i = 0; | ||
| 838 | |||
| 839 | indices = devm_kzalloc(pctldev->dev, sizeof(*indices) * | ||
| 840 | pctldev->num_functions, GFP_KERNEL); | ||
| 841 | if (!indices) | ||
| 842 | return; | ||
| 843 | 837 | ||
| 844 | radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0) | 838 | radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0) |
| 845 | indices[i++] = iter.index; | 839 | radix_tree_delete(&pctldev->pin_function_tree, iter.index); |
| 846 | |||
| 847 | for (i = 0; i < pctldev->num_functions; i++) { | ||
| 848 | function = radix_tree_lookup(&pctldev->pin_function_tree, | ||
| 849 | indices[i]); | ||
| 850 | radix_tree_delete(&pctldev->pin_function_tree, indices[i]); | ||
| 851 | devm_kfree(pctldev->dev, function); | ||
| 852 | } | ||
| 853 | 840 | ||
| 854 | pctldev->num_functions = 0; | 841 | pctldev->num_functions = 0; |
| 855 | } | 842 | } |
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index d3c5f5dfbbd7..222b6685b09f 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c | |||
| @@ -798,7 +798,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev, | |||
| 798 | break; | 798 | break; |
| 799 | case PIN_CONFIG_OUTPUT: | 799 | case PIN_CONFIG_OUTPUT: |
| 800 | __stm32_gpio_set(bank, offset, arg); | 800 | __stm32_gpio_set(bank, offset, arg); |
| 801 | ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false); | 801 | ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false); |
| 802 | break; | 802 | break; |
| 803 | default: | 803 | default: |
| 804 | ret = -EINVAL; | 804 | ret = -EINVAL; |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c index 9aec1d2232dd..6624499eae72 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c | |||
| @@ -394,7 +394,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = { | |||
| 394 | SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18), | 394 | SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18), |
| 395 | SUNXI_FUNCTION(0x0, "gpio_in"), | 395 | SUNXI_FUNCTION(0x0, "gpio_in"), |
| 396 | SUNXI_FUNCTION(0x1, "gpio_out"), | 396 | SUNXI_FUNCTION(0x1, "gpio_out"), |
| 397 | SUNXI_FUNCTION(0x3, "owa")), /* DOUT */ | 397 | SUNXI_FUNCTION(0x3, "spdif")), /* DOUT */ |
| 398 | SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19), | 398 | SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19), |
| 399 | SUNXI_FUNCTION(0x0, "gpio_in"), | 399 | SUNXI_FUNCTION(0x0, "gpio_in"), |
| 400 | SUNXI_FUNCTION(0x1, "gpio_out")), | 400 | SUNXI_FUNCTION(0x1, "gpio_out")), |
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index 2de1e603bd2b..5f3672153b12 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c | |||
| @@ -704,7 +704,7 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev) | |||
| 704 | /* Reallocate the array */ | 704 | /* Reallocate the array */ |
| 705 | u32 new_capacity = 2 * dev->pipes_capacity; | 705 | u32 new_capacity = 2 * dev->pipes_capacity; |
| 706 | struct goldfish_pipe **pipes = | 706 | struct goldfish_pipe **pipes = |
| 707 | kcalloc(new_capacity, sizeof(*pipes), GFP_KERNEL); | 707 | kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC); |
| 708 | if (!pipes) | 708 | if (!pipes) |
| 709 | return -ENOMEM; | 709 | return -ENOMEM; |
| 710 | memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity); | 710 | memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity); |
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index ef29f18b1951..4cc2f4ea0a25 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c | |||
| @@ -97,11 +97,9 @@ | |||
| 97 | } \ | 97 | } \ |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | #ifdef CONFIG_PM_SLEEP | ||
| 101 | static u8 suspend_prep_ok; | 100 | static u8 suspend_prep_ok; |
| 102 | static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp; | 101 | static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp; |
| 103 | static u64 suspend_shlw_res_temp, suspend_deep_res_temp; | 102 | static u64 suspend_shlw_res_temp, suspend_deep_res_temp; |
| 104 | #endif | ||
| 105 | 103 | ||
| 106 | struct telemetry_susp_stats { | 104 | struct telemetry_susp_stats { |
| 107 | u32 shlw_swake_ctr; | 105 | u32 shlw_swake_ctr; |
| @@ -807,7 +805,6 @@ static const struct file_operations telem_ioss_trc_verb_ops = { | |||
| 807 | .release = single_release, | 805 | .release = single_release, |
| 808 | }; | 806 | }; |
| 809 | 807 | ||
| 810 | #ifdef CONFIG_PM_SLEEP | ||
| 811 | static int pm_suspend_prep_cb(void) | 808 | static int pm_suspend_prep_cb(void) |
| 812 | { | 809 | { |
| 813 | struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS]; | 810 | struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS]; |
| @@ -937,7 +934,6 @@ static int pm_notification(struct notifier_block *this, | |||
| 937 | static struct notifier_block pm_notifier = { | 934 | static struct notifier_block pm_notifier = { |
| 938 | .notifier_call = pm_notification, | 935 | .notifier_call = pm_notification, |
| 939 | }; | 936 | }; |
| 940 | #endif /* CONFIG_PM_SLEEP */ | ||
| 941 | 937 | ||
| 942 | static int __init telemetry_debugfs_init(void) | 938 | static int __init telemetry_debugfs_init(void) |
| 943 | { | 939 | { |
| @@ -960,14 +956,13 @@ static int __init telemetry_debugfs_init(void) | |||
| 960 | if (err < 0) | 956 | if (err < 0) |
| 961 | return -EINVAL; | 957 | return -EINVAL; |
| 962 | 958 | ||
| 963 | |||
| 964 | #ifdef CONFIG_PM_SLEEP | ||
| 965 | register_pm_notifier(&pm_notifier); | 959 | register_pm_notifier(&pm_notifier); |
| 966 | #endif /* CONFIG_PM_SLEEP */ | ||
| 967 | 960 | ||
| 968 | debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL); | 961 | debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL); |
| 969 | if (!debugfs_conf->telemetry_dbg_dir) | 962 | if (!debugfs_conf->telemetry_dbg_dir) { |
| 970 | return -ENOMEM; | 963 | err = -ENOMEM; |
| 964 | goto out_pm; | ||
| 965 | } | ||
| 971 | 966 | ||
| 972 | f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO, | 967 | f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO, |
| 973 | debugfs_conf->telemetry_dbg_dir, NULL, | 968 | debugfs_conf->telemetry_dbg_dir, NULL, |
| @@ -1014,6 +1009,8 @@ static int __init telemetry_debugfs_init(void) | |||
| 1014 | out: | 1009 | out: |
| 1015 | debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); | 1010 | debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); |
| 1016 | debugfs_conf->telemetry_dbg_dir = NULL; | 1011 | debugfs_conf->telemetry_dbg_dir = NULL; |
| 1012 | out_pm: | ||
| 1013 | unregister_pm_notifier(&pm_notifier); | ||
| 1017 | 1014 | ||
| 1018 | return err; | 1015 | return err; |
| 1019 | } | 1016 | } |
| @@ -1022,6 +1019,7 @@ static void __exit telemetry_debugfs_exit(void) | |||
| 1022 | { | 1019 | { |
| 1023 | debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); | 1020 | debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); |
| 1024 | debugfs_conf->telemetry_dbg_dir = NULL; | 1021 | debugfs_conf->telemetry_dbg_dir = NULL; |
| 1022 | unregister_pm_notifier(&pm_notifier); | ||
| 1025 | } | 1023 | } |
| 1026 | 1024 | ||
| 1027 | late_initcall(telemetry_debugfs_init); | 1025 | late_initcall(telemetry_debugfs_init); |
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c index 35ce53edabf9..d5e5229308f2 100644 --- a/drivers/reset/hisilicon/hi6220_reset.c +++ b/drivers/reset/hisilicon/hi6220_reset.c | |||
| @@ -155,3 +155,5 @@ static int __init hi6220_reset_init(void) | |||
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | postcore_initcall(hi6220_reset_init); | 157 | postcore_initcall(hi6220_reset_init); |
| 158 | |||
| 159 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index e72abbc18ee3..a66a317f3e4f 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c | |||
| @@ -70,14 +70,14 @@ static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf) | |||
| 70 | { | 70 | { |
| 71 | return sprintf(buf, "I/O subchannel (Non-QDIO)\n"); | 71 | return sprintf(buf, "I/O subchannel (Non-QDIO)\n"); |
| 72 | } | 72 | } |
| 73 | MDEV_TYPE_ATTR_RO(name); | 73 | static MDEV_TYPE_ATTR_RO(name); |
| 74 | 74 | ||
| 75 | static ssize_t device_api_show(struct kobject *kobj, struct device *dev, | 75 | static ssize_t device_api_show(struct kobject *kobj, struct device *dev, |
| 76 | char *buf) | 76 | char *buf) |
| 77 | { | 77 | { |
| 78 | return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING); | 78 | return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING); |
| 79 | } | 79 | } |
| 80 | MDEV_TYPE_ATTR_RO(device_api); | 80 | static MDEV_TYPE_ATTR_RO(device_api); |
| 81 | 81 | ||
| 82 | static ssize_t available_instances_show(struct kobject *kobj, | 82 | static ssize_t available_instances_show(struct kobject *kobj, |
| 83 | struct device *dev, char *buf) | 83 | struct device *dev, char *buf) |
| @@ -86,7 +86,7 @@ static ssize_t available_instances_show(struct kobject *kobj, | |||
| 86 | 86 | ||
| 87 | return sprintf(buf, "%d\n", atomic_read(&private->avail)); | 87 | return sprintf(buf, "%d\n", atomic_read(&private->avail)); |
| 88 | } | 88 | } |
| 89 | MDEV_TYPE_ATTR_RO(available_instances); | 89 | static MDEV_TYPE_ATTR_RO(available_instances); |
| 90 | 90 | ||
| 91 | static struct attribute *mdev_types_attrs[] = { | 91 | static struct attribute *mdev_types_attrs[] = { |
| 92 | &mdev_type_attr_name.attr, | 92 | &mdev_type_attr_name.attr, |
| @@ -100,7 +100,7 @@ static struct attribute_group mdev_type_group = { | |||
| 100 | .attrs = mdev_types_attrs, | 100 | .attrs = mdev_types_attrs, |
| 101 | }; | 101 | }; |
| 102 | 102 | ||
| 103 | struct attribute_group *mdev_type_groups[] = { | 103 | static struct attribute_group *mdev_type_groups[] = { |
| 104 | &mdev_type_group, | 104 | &mdev_type_group, |
| 105 | NULL, | 105 | NULL, |
| 106 | }; | 106 | }; |
| @@ -152,7 +152,7 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev) | |||
| 152 | &events, &private->nb); | 152 | &events, &private->nb); |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | void vfio_ccw_mdev_release(struct mdev_device *mdev) | 155 | static void vfio_ccw_mdev_release(struct mdev_device *mdev) |
| 156 | { | 156 | { |
| 157 | struct vfio_ccw_private *private = | 157 | struct vfio_ccw_private *private = |
| 158 | dev_get_drvdata(mdev_parent_dev(mdev)); | 158 | dev_get_drvdata(mdev_parent_dev(mdev)); |
| @@ -233,7 +233,7 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info, | |||
| 233 | } | 233 | } |
| 234 | } | 234 | } |
| 235 | 235 | ||
| 236 | int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) | 236 | static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) |
| 237 | { | 237 | { |
| 238 | if (info->index != VFIO_CCW_IO_IRQ_INDEX) | 238 | if (info->index != VFIO_CCW_IO_IRQ_INDEX) |
| 239 | return -EINVAL; | 239 | return -EINVAL; |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 9be4596d8a08..ea099910b4e9 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
| @@ -668,10 +668,28 @@ static int ap_device_probe(struct device *dev) | |||
| 668 | struct ap_driver *ap_drv = to_ap_drv(dev->driver); | 668 | struct ap_driver *ap_drv = to_ap_drv(dev->driver); |
| 669 | int rc; | 669 | int rc; |
| 670 | 670 | ||
| 671 | /* Add queue/card to list of active queues/cards */ | ||
| 672 | spin_lock_bh(&ap_list_lock); | ||
| 673 | if (is_card_dev(dev)) | ||
| 674 | list_add(&to_ap_card(dev)->list, &ap_card_list); | ||
| 675 | else | ||
| 676 | list_add(&to_ap_queue(dev)->list, | ||
| 677 | &to_ap_queue(dev)->card->queues); | ||
| 678 | spin_unlock_bh(&ap_list_lock); | ||
| 679 | |||
| 671 | ap_dev->drv = ap_drv; | 680 | ap_dev->drv = ap_drv; |
| 672 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; | 681 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; |
| 673 | if (rc) | 682 | |
| 683 | if (rc) { | ||
| 684 | spin_lock_bh(&ap_list_lock); | ||
| 685 | if (is_card_dev(dev)) | ||
| 686 | list_del_init(&to_ap_card(dev)->list); | ||
| 687 | else | ||
| 688 | list_del_init(&to_ap_queue(dev)->list); | ||
| 689 | spin_unlock_bh(&ap_list_lock); | ||
| 674 | ap_dev->drv = NULL; | 690 | ap_dev->drv = NULL; |
| 691 | } | ||
| 692 | |||
| 675 | return rc; | 693 | return rc; |
| 676 | } | 694 | } |
| 677 | 695 | ||
| @@ -680,14 +698,17 @@ static int ap_device_remove(struct device *dev) | |||
| 680 | struct ap_device *ap_dev = to_ap_dev(dev); | 698 | struct ap_device *ap_dev = to_ap_dev(dev); |
| 681 | struct ap_driver *ap_drv = ap_dev->drv; | 699 | struct ap_driver *ap_drv = ap_dev->drv; |
| 682 | 700 | ||
| 701 | if (ap_drv->remove) | ||
| 702 | ap_drv->remove(ap_dev); | ||
| 703 | |||
| 704 | /* Remove queue/card from list of active queues/cards */ | ||
| 683 | spin_lock_bh(&ap_list_lock); | 705 | spin_lock_bh(&ap_list_lock); |
| 684 | if (is_card_dev(dev)) | 706 | if (is_card_dev(dev)) |
| 685 | list_del_init(&to_ap_card(dev)->list); | 707 | list_del_init(&to_ap_card(dev)->list); |
| 686 | else | 708 | else |
| 687 | list_del_init(&to_ap_queue(dev)->list); | 709 | list_del_init(&to_ap_queue(dev)->list); |
| 688 | spin_unlock_bh(&ap_list_lock); | 710 | spin_unlock_bh(&ap_list_lock); |
| 689 | if (ap_drv->remove) | 711 | |
| 690 | ap_drv->remove(ap_dev); | ||
| 691 | return 0; | 712 | return 0; |
| 692 | } | 713 | } |
| 693 | 714 | ||
| @@ -1056,10 +1077,6 @@ static void ap_scan_bus(struct work_struct *unused) | |||
| 1056 | } | 1077 | } |
| 1057 | /* get it and thus adjust reference counter */ | 1078 | /* get it and thus adjust reference counter */ |
| 1058 | get_device(&ac->ap_dev.device); | 1079 | get_device(&ac->ap_dev.device); |
| 1059 | /* Add card device to card list */ | ||
| 1060 | spin_lock_bh(&ap_list_lock); | ||
| 1061 | list_add(&ac->list, &ap_card_list); | ||
| 1062 | spin_unlock_bh(&ap_list_lock); | ||
| 1063 | } | 1080 | } |
| 1064 | /* now create the new queue device */ | 1081 | /* now create the new queue device */ |
| 1065 | aq = ap_queue_create(qid, type); | 1082 | aq = ap_queue_create(qid, type); |
| @@ -1070,10 +1087,6 @@ static void ap_scan_bus(struct work_struct *unused) | |||
| 1070 | aq->ap_dev.device.parent = &ac->ap_dev.device; | 1087 | aq->ap_dev.device.parent = &ac->ap_dev.device; |
| 1071 | dev_set_name(&aq->ap_dev.device, | 1088 | dev_set_name(&aq->ap_dev.device, |
| 1072 | "%02x.%04x", id, dom); | 1089 | "%02x.%04x", id, dom); |
| 1073 | /* Add queue device to card queue list */ | ||
| 1074 | spin_lock_bh(&ap_list_lock); | ||
| 1075 | list_add(&aq->list, &ac->queues); | ||
| 1076 | spin_unlock_bh(&ap_list_lock); | ||
| 1077 | /* Start with a device reset */ | 1090 | /* Start with a device reset */ |
| 1078 | spin_lock_bh(&aq->lock); | 1091 | spin_lock_bh(&aq->lock); |
| 1079 | ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); | 1092 | ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); |
| @@ -1081,9 +1094,6 @@ static void ap_scan_bus(struct work_struct *unused) | |||
| 1081 | /* Register device */ | 1094 | /* Register device */ |
| 1082 | rc = device_register(&aq->ap_dev.device); | 1095 | rc = device_register(&aq->ap_dev.device); |
| 1083 | if (rc) { | 1096 | if (rc) { |
| 1084 | spin_lock_bh(&ap_list_lock); | ||
| 1085 | list_del_init(&aq->list); | ||
| 1086 | spin_unlock_bh(&ap_list_lock); | ||
| 1087 | put_device(&aq->ap_dev.device); | 1097 | put_device(&aq->ap_dev.device); |
| 1088 | continue; | 1098 | continue; |
| 1089 | } | 1099 | } |
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c index cfa161ccc74e..836efac96813 100644 --- a/drivers/s390/crypto/ap_card.c +++ b/drivers/s390/crypto/ap_card.c | |||
| @@ -160,7 +160,14 @@ static struct device_type ap_card_type = { | |||
| 160 | 160 | ||
| 161 | static void ap_card_device_release(struct device *dev) | 161 | static void ap_card_device_release(struct device *dev) |
| 162 | { | 162 | { |
| 163 | kfree(to_ap_card(dev)); | 163 | struct ap_card *ac = to_ap_card(dev); |
| 164 | |||
| 165 | if (!list_empty(&ac->list)) { | ||
| 166 | spin_lock_bh(&ap_list_lock); | ||
| 167 | list_del_init(&ac->list); | ||
| 168 | spin_unlock_bh(&ap_list_lock); | ||
| 169 | } | ||
| 170 | kfree(ac); | ||
| 164 | } | 171 | } |
| 165 | 172 | ||
| 166 | struct ap_card *ap_card_create(int id, int queue_depth, int device_type, | 173 | struct ap_card *ap_card_create(int id, int queue_depth, int device_type, |
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 480c58a63769..0f1a5d02acb0 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c | |||
| @@ -584,7 +584,14 @@ static struct device_type ap_queue_type = { | |||
| 584 | 584 | ||
| 585 | static void ap_queue_device_release(struct device *dev) | 585 | static void ap_queue_device_release(struct device *dev) |
| 586 | { | 586 | { |
| 587 | kfree(to_ap_queue(dev)); | 587 | struct ap_queue *aq = to_ap_queue(dev); |
| 588 | |||
| 589 | if (!list_empty(&aq->list)) { | ||
| 590 | spin_lock_bh(&ap_list_lock); | ||
| 591 | list_del_init(&aq->list); | ||
| 592 | spin_unlock_bh(&ap_list_lock); | ||
| 593 | } | ||
| 594 | kfree(aq); | ||
| 588 | } | 595 | } |
| 589 | 596 | ||
| 590 | struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) | 597 | struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index dba94b486f05..fa732bd86729 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
| @@ -1954,7 +1954,6 @@ static void netiucv_free_netdevice(struct net_device *dev) | |||
| 1954 | privptr->conn = NULL; privptr->fsm = NULL; | 1954 | privptr->conn = NULL; privptr->fsm = NULL; |
| 1955 | /* privptr gets freed by free_netdev() */ | 1955 | /* privptr gets freed by free_netdev() */ |
| 1956 | } | 1956 | } |
| 1957 | free_netdev(dev); | ||
| 1958 | } | 1957 | } |
| 1959 | 1958 | ||
| 1960 | /** | 1959 | /** |
| @@ -1972,7 +1971,8 @@ static void netiucv_setup_netdevice(struct net_device *dev) | |||
| 1972 | dev->mtu = NETIUCV_MTU_DEFAULT; | 1971 | dev->mtu = NETIUCV_MTU_DEFAULT; |
| 1973 | dev->min_mtu = 576; | 1972 | dev->min_mtu = 576; |
| 1974 | dev->max_mtu = NETIUCV_MTU_MAX; | 1973 | dev->max_mtu = NETIUCV_MTU_MAX; |
| 1975 | dev->destructor = netiucv_free_netdevice; | 1974 | dev->needs_free_netdev = true; |
| 1975 | dev->priv_destructor = netiucv_free_netdevice; | ||
| 1976 | dev->hard_header_len = NETIUCV_HDRLEN; | 1976 | dev->hard_header_len = NETIUCV_HDRLEN; |
| 1977 | dev->addr_len = 0; | 1977 | dev->addr_len = 0; |
| 1978 | dev->type = ARPHRD_SLIP; | 1978 | dev->type = ARPHRD_SLIP; |
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 4fc8ed5fe067..1f424e40afdf 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h | |||
| @@ -191,6 +191,7 @@ struct bnx2fc_hba { | |||
| 191 | struct bnx2fc_cmd_mgr *cmd_mgr; | 191 | struct bnx2fc_cmd_mgr *cmd_mgr; |
| 192 | spinlock_t hba_lock; | 192 | spinlock_t hba_lock; |
| 193 | struct mutex hba_mutex; | 193 | struct mutex hba_mutex; |
| 194 | struct mutex hba_stats_mutex; | ||
| 194 | unsigned long adapter_state; | 195 | unsigned long adapter_state; |
| 195 | #define ADAPTER_STATE_UP 0 | 196 | #define ADAPTER_STATE_UP 0 |
| 196 | #define ADAPTER_STATE_GOING_DOWN 1 | 197 | #define ADAPTER_STATE_GOING_DOWN 1 |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 93b5a0012417..902722dc4ce3 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
| @@ -663,15 +663,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) | |||
| 663 | if (!fw_stats) | 663 | if (!fw_stats) |
| 664 | return NULL; | 664 | return NULL; |
| 665 | 665 | ||
| 666 | mutex_lock(&hba->hba_stats_mutex); | ||
| 667 | |||
| 666 | bnx2fc_stats = fc_get_host_stats(shost); | 668 | bnx2fc_stats = fc_get_host_stats(shost); |
| 667 | 669 | ||
| 668 | init_completion(&hba->stat_req_done); | 670 | init_completion(&hba->stat_req_done); |
| 669 | if (bnx2fc_send_stat_req(hba)) | 671 | if (bnx2fc_send_stat_req(hba)) |
| 670 | return bnx2fc_stats; | 672 | goto unlock_stats_mutex; |
| 671 | rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); | 673 | rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); |
| 672 | if (!rc) { | 674 | if (!rc) { |
| 673 | BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); | 675 | BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); |
| 674 | return bnx2fc_stats; | 676 | goto unlock_stats_mutex; |
| 675 | } | 677 | } |
| 676 | BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); | 678 | BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); |
| 677 | bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; | 679 | bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; |
| @@ -693,6 +695,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) | |||
| 693 | 695 | ||
| 694 | memcpy(&hba->prev_stats, hba->stats_buffer, | 696 | memcpy(&hba->prev_stats, hba->stats_buffer, |
| 695 | sizeof(struct fcoe_statistics_params)); | 697 | sizeof(struct fcoe_statistics_params)); |
| 698 | |||
| 699 | unlock_stats_mutex: | ||
| 700 | mutex_unlock(&hba->hba_stats_mutex); | ||
| 696 | return bnx2fc_stats; | 701 | return bnx2fc_stats; |
| 697 | } | 702 | } |
| 698 | 703 | ||
| @@ -1340,6 +1345,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) | |||
| 1340 | } | 1345 | } |
| 1341 | spin_lock_init(&hba->hba_lock); | 1346 | spin_lock_init(&hba->hba_lock); |
| 1342 | mutex_init(&hba->hba_mutex); | 1347 | mutex_init(&hba->hba_mutex); |
| 1348 | mutex_init(&hba->hba_stats_mutex); | ||
| 1343 | 1349 | ||
| 1344 | hba->cnic = cnic; | 1350 | hba->cnic = cnic; |
| 1345 | 1351 | ||
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 1076c1578322..0aae094ab91c 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | |||
| @@ -1595,7 +1595,6 @@ static void release_offload_resources(struct cxgbi_sock *csk) | |||
| 1595 | cxgbi_sock_put(csk); | 1595 | cxgbi_sock_put(csk); |
| 1596 | } | 1596 | } |
| 1597 | csk->dst = NULL; | 1597 | csk->dst = NULL; |
| 1598 | csk->cdev = NULL; | ||
| 1599 | } | 1598 | } |
| 1600 | 1599 | ||
| 1601 | static int init_act_open(struct cxgbi_sock *csk) | 1600 | static int init_act_open(struct cxgbi_sock *csk) |
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index bd7d39ecbd24..e4c83b7c96a8 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c | |||
| @@ -867,7 +867,8 @@ static void need_active_close(struct cxgbi_sock *csk) | |||
| 867 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", | 867 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", |
| 868 | csk, (csk)->state, (csk)->flags, (csk)->tid); | 868 | csk, (csk)->state, (csk)->flags, (csk)->tid); |
| 869 | spin_lock_bh(&csk->lock); | 869 | spin_lock_bh(&csk->lock); |
| 870 | dst_confirm(csk->dst); | 870 | if (csk->dst) |
| 871 | dst_confirm(csk->dst); | ||
| 871 | data_lost = skb_queue_len(&csk->receive_queue); | 872 | data_lost = skb_queue_len(&csk->receive_queue); |
| 872 | __skb_queue_purge(&csk->receive_queue); | 873 | __skb_queue_purge(&csk->receive_queue); |
| 873 | 874 | ||
| @@ -882,7 +883,8 @@ static void need_active_close(struct cxgbi_sock *csk) | |||
| 882 | } | 883 | } |
| 883 | 884 | ||
| 884 | if (close_req) { | 885 | if (close_req) { |
| 885 | if (data_lost) | 886 | if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) || |
| 887 | data_lost) | ||
| 886 | csk->cdev->csk_send_abort_req(csk); | 888 | csk->cdev->csk_send_abort_req(csk); |
| 887 | else | 889 | else |
| 888 | csk->cdev->csk_send_close_req(csk); | 890 | csk->cdev->csk_send_close_req(csk); |
| @@ -1186,9 +1188,10 @@ static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb) | |||
| 1186 | cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); | 1188 | cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); |
| 1187 | skb = next; | 1189 | skb = next; |
| 1188 | } | 1190 | } |
| 1189 | done: | 1191 | |
| 1190 | if (likely(skb_queue_len(&csk->write_queue))) | 1192 | if (likely(skb_queue_len(&csk->write_queue))) |
| 1191 | cdev->csk_push_tx_frames(csk, 1); | 1193 | cdev->csk_push_tx_frames(csk, 1); |
| 1194 | done: | ||
| 1192 | spin_unlock_bh(&csk->lock); | 1195 | spin_unlock_bh(&csk->lock); |
| 1193 | return copied; | 1196 | return copied; |
| 1194 | 1197 | ||
| @@ -1568,9 +1571,12 @@ static inline int read_pdu_skb(struct iscsi_conn *conn, | |||
| 1568 | } | 1571 | } |
| 1569 | } | 1572 | } |
| 1570 | 1573 | ||
| 1571 | static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) | 1574 | static int |
| 1575 | skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn, | ||
| 1576 | struct sk_buff *skb) | ||
| 1572 | { | 1577 | { |
| 1573 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 1578 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
| 1579 | int err; | ||
| 1574 | 1580 | ||
| 1575 | log_debug(1 << CXGBI_DBG_PDU_RX, | 1581 | log_debug(1 << CXGBI_DBG_PDU_RX, |
| 1576 | "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", | 1582 | "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", |
| @@ -1608,7 +1614,16 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) | |||
| 1608 | } | 1614 | } |
| 1609 | } | 1615 | } |
| 1610 | 1616 | ||
| 1611 | return read_pdu_skb(conn, skb, 0, 0); | 1617 | err = read_pdu_skb(conn, skb, 0, 0); |
| 1618 | if (likely(err >= 0)) { | ||
| 1619 | struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data; | ||
| 1620 | u8 opcode = hdr->opcode & ISCSI_OPCODE_MASK; | ||
| 1621 | |||
| 1622 | if (unlikely(opcode == ISCSI_OP_LOGOUT_RSP)) | ||
| 1623 | cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD); | ||
| 1624 | } | ||
| 1625 | |||
| 1626 | return err; | ||
| 1612 | } | 1627 | } |
| 1613 | 1628 | ||
| 1614 | static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, | 1629 | static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, |
| @@ -1713,7 +1728,7 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) | |||
| 1713 | cxgbi_skcb_rx_pdulen(skb)); | 1728 | cxgbi_skcb_rx_pdulen(skb)); |
| 1714 | 1729 | ||
| 1715 | if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { | 1730 | if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { |
| 1716 | err = skb_read_pdu_bhs(conn, skb); | 1731 | err = skb_read_pdu_bhs(csk, conn, skb); |
| 1717 | if (err < 0) { | 1732 | if (err < 0) { |
| 1718 | pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " | 1733 | pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " |
| 1719 | "f 0x%lx, plen %u.\n", | 1734 | "f 0x%lx, plen %u.\n", |
| @@ -1731,7 +1746,7 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) | |||
| 1731 | cxgbi_skcb_flags(skb), | 1746 | cxgbi_skcb_flags(skb), |
| 1732 | cxgbi_skcb_rx_pdulen(skb)); | 1747 | cxgbi_skcb_rx_pdulen(skb)); |
| 1733 | } else { | 1748 | } else { |
| 1734 | err = skb_read_pdu_bhs(conn, skb); | 1749 | err = skb_read_pdu_bhs(csk, conn, skb); |
| 1735 | if (err < 0) { | 1750 | if (err < 0) { |
| 1736 | pr_err("bhs, csk 0x%p, skb 0x%p,%u, " | 1751 | pr_err("bhs, csk 0x%p, skb 0x%p,%u, " |
| 1737 | "f 0x%lx, plen %u.\n", | 1752 | "f 0x%lx, plen %u.\n", |
| @@ -1873,6 +1888,11 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) | |||
| 1873 | tcp_task->dd_data = tdata; | 1888 | tcp_task->dd_data = tdata; |
| 1874 | task->hdr = NULL; | 1889 | task->hdr = NULL; |
| 1875 | 1890 | ||
| 1891 | if (tdata->skb) { | ||
| 1892 | kfree_skb(tdata->skb); | ||
| 1893 | tdata->skb = NULL; | ||
| 1894 | } | ||
| 1895 | |||
| 1876 | if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && | 1896 | if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && |
| 1877 | (opcode == ISCSI_OP_SCSI_DATA_OUT || | 1897 | (opcode == ISCSI_OP_SCSI_DATA_OUT || |
| 1878 | (opcode == ISCSI_OP_SCSI_CMD && | 1898 | (opcode == ISCSI_OP_SCSI_CMD && |
| @@ -1890,6 +1910,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) | |||
| 1890 | return -ENOMEM; | 1910 | return -ENOMEM; |
| 1891 | } | 1911 | } |
| 1892 | 1912 | ||
| 1913 | skb_get(tdata->skb); | ||
| 1893 | skb_reserve(tdata->skb, cdev->skb_tx_rsvd); | 1914 | skb_reserve(tdata->skb, cdev->skb_tx_rsvd); |
| 1894 | task->hdr = (struct iscsi_hdr *)tdata->skb->data; | 1915 | task->hdr = (struct iscsi_hdr *)tdata->skb->data; |
| 1895 | task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ | 1916 | task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ |
| @@ -2035,9 +2056,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) | |||
| 2035 | unsigned int datalen; | 2056 | unsigned int datalen; |
| 2036 | int err; | 2057 | int err; |
| 2037 | 2058 | ||
| 2038 | if (!skb) { | 2059 | if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) { |
| 2039 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, | 2060 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
| 2040 | "task 0x%p, skb NULL.\n", task); | 2061 | "task 0x%p, skb 0x%p\n", task, skb); |
| 2041 | return 0; | 2062 | return 0; |
| 2042 | } | 2063 | } |
| 2043 | 2064 | ||
| @@ -2050,7 +2071,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) | |||
| 2050 | } | 2071 | } |
| 2051 | 2072 | ||
| 2052 | datalen = skb->data_len; | 2073 | datalen = skb->data_len; |
| 2053 | tdata->skb = NULL; | ||
| 2054 | 2074 | ||
| 2055 | /* write ppod first if using ofldq to write ppod */ | 2075 | /* write ppod first if using ofldq to write ppod */ |
| 2056 | if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { | 2076 | if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { |
| @@ -2078,6 +2098,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) | |||
| 2078 | pdulen += ISCSI_DIGEST_SIZE; | 2098 | pdulen += ISCSI_DIGEST_SIZE; |
| 2079 | 2099 | ||
| 2080 | task->conn->txdata_octets += pdulen; | 2100 | task->conn->txdata_octets += pdulen; |
| 2101 | cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE); | ||
| 2081 | return 0; | 2102 | return 0; |
| 2082 | } | 2103 | } |
| 2083 | 2104 | ||
| @@ -2086,7 +2107,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) | |||
| 2086 | "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", | 2107 | "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", |
| 2087 | task, skb, skb->len, skb->data_len, err); | 2108 | task, skb, skb->len, skb->data_len, err); |
| 2088 | /* reset skb to send when we are called again */ | 2109 | /* reset skb to send when we are called again */ |
| 2089 | tdata->skb = skb; | ||
| 2090 | return err; | 2110 | return err; |
| 2091 | } | 2111 | } |
| 2092 | 2112 | ||
| @@ -2094,7 +2114,8 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) | |||
| 2094 | "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", | 2114 | "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", |
| 2095 | task->itt, skb, skb->len, skb->data_len, err); | 2115 | task->itt, skb, skb->len, skb->data_len, err); |
| 2096 | 2116 | ||
| 2097 | kfree_skb(skb); | 2117 | __kfree_skb(tdata->skb); |
| 2118 | tdata->skb = NULL; | ||
| 2098 | 2119 | ||
| 2099 | iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); | 2120 | iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); |
| 2100 | iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); | 2121 | iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); |
| @@ -2113,8 +2134,10 @@ void cxgbi_cleanup_task(struct iscsi_task *task) | |||
| 2113 | 2134 | ||
| 2114 | tcp_task->dd_data = NULL; | 2135 | tcp_task->dd_data = NULL; |
| 2115 | /* never reached the xmit task callout */ | 2136 | /* never reached the xmit task callout */ |
| 2116 | if (tdata->skb) | 2137 | if (tdata->skb) { |
| 2117 | __kfree_skb(tdata->skb); | 2138 | kfree_skb(tdata->skb); |
| 2139 | tdata->skb = NULL; | ||
| 2140 | } | ||
| 2118 | 2141 | ||
| 2119 | task_release_itt(task, task->hdr_itt); | 2142 | task_release_itt(task, task->hdr_itt); |
| 2120 | memset(tdata, 0, sizeof(*tdata)); | 2143 | memset(tdata, 0, sizeof(*tdata)); |
| @@ -2714,6 +2737,9 @@ EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible); | |||
| 2714 | static int __init libcxgbi_init_module(void) | 2737 | static int __init libcxgbi_init_module(void) |
| 2715 | { | 2738 | { |
| 2716 | pr_info("%s", version); | 2739 | pr_info("%s", version); |
| 2740 | |||
| 2741 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < | ||
| 2742 | sizeof(struct cxgbi_skb_cb)); | ||
| 2717 | return 0; | 2743 | return 0; |
| 2718 | } | 2744 | } |
| 2719 | 2745 | ||
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 18e0ea83d361..37f07aaab1e4 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h | |||
| @@ -187,6 +187,7 @@ enum cxgbi_sock_flags { | |||
| 187 | CTPF_HAS_ATID, /* reserved atid */ | 187 | CTPF_HAS_ATID, /* reserved atid */ |
| 188 | CTPF_HAS_TID, /* reserved hw tid */ | 188 | CTPF_HAS_TID, /* reserved hw tid */ |
| 189 | CTPF_OFFLOAD_DOWN, /* offload function off */ | 189 | CTPF_OFFLOAD_DOWN, /* offload function off */ |
| 190 | CTPF_LOGOUT_RSP_RCVD, /* received logout response */ | ||
| 190 | }; | 191 | }; |
| 191 | 192 | ||
| 192 | struct cxgbi_skb_rx_cb { | 193 | struct cxgbi_skb_rx_cb { |
| @@ -195,7 +196,8 @@ struct cxgbi_skb_rx_cb { | |||
| 195 | }; | 196 | }; |
| 196 | 197 | ||
| 197 | struct cxgbi_skb_tx_cb { | 198 | struct cxgbi_skb_tx_cb { |
| 198 | void *l2t; | 199 | void *handle; |
| 200 | void *arp_err_handler; | ||
| 199 | struct sk_buff *wr_next; | 201 | struct sk_buff *wr_next; |
| 200 | }; | 202 | }; |
| 201 | 203 | ||
| @@ -203,6 +205,7 @@ enum cxgbi_skcb_flags { | |||
| 203 | SKCBF_TX_NEED_HDR, /* packet needs a header */ | 205 | SKCBF_TX_NEED_HDR, /* packet needs a header */ |
| 204 | SKCBF_TX_MEM_WRITE, /* memory write */ | 206 | SKCBF_TX_MEM_WRITE, /* memory write */ |
| 205 | SKCBF_TX_FLAG_COMPL, /* wr completion flag */ | 207 | SKCBF_TX_FLAG_COMPL, /* wr completion flag */ |
| 208 | SKCBF_TX_DONE, /* skb tx done */ | ||
| 206 | SKCBF_RX_COALESCED, /* received whole pdu */ | 209 | SKCBF_RX_COALESCED, /* received whole pdu */ |
| 207 | SKCBF_RX_HDR, /* received pdu header */ | 210 | SKCBF_RX_HDR, /* received pdu header */ |
| 208 | SKCBF_RX_DATA, /* received pdu payload */ | 211 | SKCBF_RX_DATA, /* received pdu payload */ |
| @@ -215,13 +218,13 @@ enum cxgbi_skcb_flags { | |||
| 215 | }; | 218 | }; |
| 216 | 219 | ||
| 217 | struct cxgbi_skb_cb { | 220 | struct cxgbi_skb_cb { |
| 218 | unsigned char ulp_mode; | ||
| 219 | unsigned long flags; | ||
| 220 | unsigned int seq; | ||
| 221 | union { | 221 | union { |
| 222 | struct cxgbi_skb_rx_cb rx; | 222 | struct cxgbi_skb_rx_cb rx; |
| 223 | struct cxgbi_skb_tx_cb tx; | 223 | struct cxgbi_skb_tx_cb tx; |
| 224 | }; | 224 | }; |
| 225 | unsigned char ulp_mode; | ||
| 226 | unsigned long flags; | ||
| 227 | unsigned int seq; | ||
| 225 | }; | 228 | }; |
| 226 | 229 | ||
| 227 | #define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) | 230 | #define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) |
| @@ -374,11 +377,9 @@ static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk, | |||
| 374 | cxgbi_skcb_tx_wr_next(skb) = NULL; | 377 | cxgbi_skcb_tx_wr_next(skb) = NULL; |
| 375 | /* | 378 | /* |
| 376 | * We want to take an extra reference since both us and the driver | 379 | * We want to take an extra reference since both us and the driver |
| 377 | * need to free the packet before it's really freed. We know there's | 380 | * need to free the packet before it's really freed. |
| 378 | * just one user currently so we use atomic_set rather than skb_get | ||
| 379 | * to avoid the atomic op. | ||
| 380 | */ | 381 | */ |
| 381 | atomic_set(&skb->users, 2); | 382 | skb_get(skb); |
| 382 | 383 | ||
| 383 | if (!csk->wr_pending_head) | 384 | if (!csk->wr_pending_head) |
| 384 | csk->wr_pending_head = skb; | 385 | csk->wr_pending_head = skb; |
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 3cbab8710e58..2ceff585f189 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c | |||
| @@ -265,18 +265,16 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr, | |||
| 265 | struct list_head *list, | 265 | struct list_head *list, |
| 266 | unsigned char *cdb) | 266 | unsigned char *cdb) |
| 267 | { | 267 | { |
| 268 | struct scsi_device *sdev = ctlr->ms_sdev; | ||
| 269 | struct rdac_dh_data *h = sdev->handler_data; | ||
| 270 | struct rdac_mode_common *common; | 268 | struct rdac_mode_common *common; |
| 271 | unsigned data_size; | 269 | unsigned data_size; |
| 272 | struct rdac_queue_data *qdata; | 270 | struct rdac_queue_data *qdata; |
| 273 | u8 *lun_table; | 271 | u8 *lun_table; |
| 274 | 272 | ||
| 275 | if (h->ctlr->use_ms10) { | 273 | if (ctlr->use_ms10) { |
| 276 | struct rdac_pg_expanded *rdac_pg; | 274 | struct rdac_pg_expanded *rdac_pg; |
| 277 | 275 | ||
| 278 | data_size = sizeof(struct rdac_pg_expanded); | 276 | data_size = sizeof(struct rdac_pg_expanded); |
| 279 | rdac_pg = &h->ctlr->mode_select.expanded; | 277 | rdac_pg = &ctlr->mode_select.expanded; |
| 280 | memset(rdac_pg, 0, data_size); | 278 | memset(rdac_pg, 0, data_size); |
| 281 | common = &rdac_pg->common; | 279 | common = &rdac_pg->common; |
| 282 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; | 280 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; |
| @@ -288,7 +286,7 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr, | |||
| 288 | struct rdac_pg_legacy *rdac_pg; | 286 | struct rdac_pg_legacy *rdac_pg; |
| 289 | 287 | ||
| 290 | data_size = sizeof(struct rdac_pg_legacy); | 288 | data_size = sizeof(struct rdac_pg_legacy); |
| 291 | rdac_pg = &h->ctlr->mode_select.legacy; | 289 | rdac_pg = &ctlr->mode_select.legacy; |
| 292 | memset(rdac_pg, 0, data_size); | 290 | memset(rdac_pg, 0, data_size); |
| 293 | common = &rdac_pg->common; | 291 | common = &rdac_pg->common; |
| 294 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; | 292 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; |
| @@ -304,7 +302,7 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr, | |||
| 304 | } | 302 | } |
| 305 | 303 | ||
| 306 | /* Prepare the command. */ | 304 | /* Prepare the command. */ |
| 307 | if (h->ctlr->use_ms10) { | 305 | if (ctlr->use_ms10) { |
| 308 | cdb[0] = MODE_SELECT_10; | 306 | cdb[0] = MODE_SELECT_10; |
| 309 | cdb[7] = data_size >> 8; | 307 | cdb[7] = data_size >> 8; |
| 310 | cdb[8] = data_size & 0xff; | 308 | cdb[8] = data_size & 0xff; |
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index d390325c99ec..abf6026645dd 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | |||
| @@ -1170,6 +1170,8 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi) | |||
| 1170 | cmd = list_first_entry_or_null(&vscsi->free_cmd, | 1170 | cmd = list_first_entry_or_null(&vscsi->free_cmd, |
| 1171 | struct ibmvscsis_cmd, list); | 1171 | struct ibmvscsis_cmd, list); |
| 1172 | if (cmd) { | 1172 | if (cmd) { |
| 1173 | if (cmd->abort_cmd) | ||
| 1174 | cmd->abort_cmd = NULL; | ||
| 1173 | cmd->flags &= ~(DELAY_SEND); | 1175 | cmd->flags &= ~(DELAY_SEND); |
| 1174 | list_del(&cmd->list); | 1176 | list_del(&cmd->list); |
| 1175 | cmd->iue = iue; | 1177 | cmd->iue = iue; |
| @@ -1774,6 +1776,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) | |||
| 1774 | if (cmd->abort_cmd) { | 1776 | if (cmd->abort_cmd) { |
| 1775 | retry = true; | 1777 | retry = true; |
| 1776 | cmd->abort_cmd->flags &= ~(DELAY_SEND); | 1778 | cmd->abort_cmd->flags &= ~(DELAY_SEND); |
| 1779 | cmd->abort_cmd = NULL; | ||
| 1777 | } | 1780 | } |
| 1778 | 1781 | ||
| 1779 | /* | 1782 | /* |
| @@ -1788,6 +1791,25 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) | |||
| 1788 | list_del(&cmd->list); | 1791 | list_del(&cmd->list); |
| 1789 | ibmvscsis_free_cmd_resources(vscsi, | 1792 | ibmvscsis_free_cmd_resources(vscsi, |
| 1790 | cmd); | 1793 | cmd); |
| 1794 | /* | ||
| 1795 | * With a successfully aborted op | ||
| 1796 | * through LIO we want to increment the | ||
| 1797 | * the vscsi credit so that when we dont | ||
| 1798 | * send a rsp to the original scsi abort | ||
| 1799 | * op (h_send_crq), but the tm rsp to | ||
| 1800 | * the abort is sent, the credit is | ||
| 1801 | * correctly sent with the abort tm rsp. | ||
| 1802 | * We would need 1 for the abort tm rsp | ||
| 1803 | * and 1 credit for the aborted scsi op. | ||
| 1804 | * Thus we need to increment here. | ||
| 1805 | * Also we want to increment the credit | ||
| 1806 | * here because we want to make sure | ||
| 1807 | * cmd is actually released first | ||
| 1808 | * otherwise the client will think it | ||
| 1809 | * it can send a new cmd, and we could | ||
| 1810 | * find ourselves short of cmd elements. | ||
| 1811 | */ | ||
| 1812 | vscsi->credit += 1; | ||
| 1791 | } else { | 1813 | } else { |
| 1792 | iue = cmd->iue; | 1814 | iue = cmd->iue; |
| 1793 | 1815 | ||
| @@ -2962,10 +2984,7 @@ static long srp_build_response(struct scsi_info *vscsi, | |||
| 2962 | 2984 | ||
| 2963 | rsp->opcode = SRP_RSP; | 2985 | rsp->opcode = SRP_RSP; |
| 2964 | 2986 | ||
| 2965 | if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING) | 2987 | rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); |
| 2966 | rsp->req_lim_delta = cpu_to_be32(vscsi->credit); | ||
| 2967 | else | ||
| 2968 | rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); | ||
| 2969 | rsp->tag = cmd->rsp.tag; | 2988 | rsp->tag = cmd->rsp.tag; |
| 2970 | rsp->flags = 0; | 2989 | rsp->flags = 0; |
| 2971 | 2990 | ||
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 8912767e7bc8..da669dce12fe 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
| @@ -127,7 +127,7 @@ int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *, | |||
| 127 | void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); | 127 | void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); |
| 128 | int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, | 128 | int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, |
| 129 | struct serv_parm *, uint32_t, int); | 129 | struct serv_parm *, uint32_t, int); |
| 130 | int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); | 130 | void lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); |
| 131 | void lpfc_more_plogi(struct lpfc_vport *); | 131 | void lpfc_more_plogi(struct lpfc_vport *); |
| 132 | void lpfc_more_adisc(struct lpfc_vport *); | 132 | void lpfc_more_adisc(struct lpfc_vport *); |
| 133 | void lpfc_end_rscn(struct lpfc_vport *); | 133 | void lpfc_end_rscn(struct lpfc_vport *); |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index f2cd19c6c2df..24ce96dcc94d 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
| @@ -978,9 +978,10 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
| 978 | ndlp, did, ndlp->nlp_fc4_type, | 978 | ndlp, did, ndlp->nlp_fc4_type, |
| 979 | FC_TYPE_FCP, FC_TYPE_NVME); | 979 | FC_TYPE_FCP, FC_TYPE_NVME); |
| 980 | ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; | 980 | ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; |
| 981 | |||
| 982 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); | ||
| 983 | lpfc_issue_els_prli(vport, ndlp, 0); | ||
| 981 | } | 984 | } |
| 982 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); | ||
| 983 | lpfc_issue_els_prli(vport, ndlp, 0); | ||
| 984 | } else | 985 | } else |
| 985 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, | 986 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, |
| 986 | "3065 GFT_ID failed x%08x\n", irsp->ulpStatus); | 987 | "3065 GFT_ID failed x%08x\n", irsp->ulpStatus); |
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index bff3de053df4..f74cb0142fd4 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
| @@ -206,7 +206,7 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
| 206 | * associated with a LPFC_NODELIST entry. This | 206 | * associated with a LPFC_NODELIST entry. This |
| 207 | * routine effectively results in a "software abort". | 207 | * routine effectively results in a "software abort". |
| 208 | */ | 208 | */ |
| 209 | int | 209 | void |
| 210 | lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | 210 | lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) |
| 211 | { | 211 | { |
| 212 | LIST_HEAD(abort_list); | 212 | LIST_HEAD(abort_list); |
| @@ -215,6 +215,10 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
| 215 | 215 | ||
| 216 | pring = lpfc_phba_elsring(phba); | 216 | pring = lpfc_phba_elsring(phba); |
| 217 | 217 | ||
| 218 | /* In case of error recovery path, we might have a NULL pring here */ | ||
| 219 | if (!pring) | ||
| 220 | return; | ||
| 221 | |||
| 218 | /* Abort outstanding I/O on NPort <nlp_DID> */ | 222 | /* Abort outstanding I/O on NPort <nlp_DID> */ |
| 219 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, | 223 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, |
| 220 | "2819 Abort outstanding I/O on NPort x%x " | 224 | "2819 Abort outstanding I/O on NPort x%x " |
| @@ -273,7 +277,6 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
| 273 | IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); | 277 | IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); |
| 274 | 278 | ||
| 275 | lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); | 279 | lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); |
| 276 | return 0; | ||
| 277 | } | 280 | } |
| 278 | 281 | ||
| 279 | static int | 282 | static int |
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 074a6b5e7763..518b15e6f222 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c | |||
| @@ -799,8 +799,8 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, | |||
| 799 | } | 799 | } |
| 800 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); | 800 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); |
| 801 | 801 | ||
| 802 | lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid, | 802 | lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid, |
| 803 | ctxp->state, 0); | 803 | ctxp->state, aborting); |
| 804 | 804 | ||
| 805 | atomic_inc(&lpfc_nvmep->xmt_fcp_release); | 805 | atomic_inc(&lpfc_nvmep->xmt_fcp_release); |
| 806 | 806 | ||
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h index 5ca3e8c28a3f..32632c9b2276 100644 --- a/drivers/scsi/qedi/qedi.h +++ b/drivers/scsi/qedi/qedi.h | |||
| @@ -38,7 +38,7 @@ struct qedi_endpoint; | |||
| 38 | #define QEDI_MAX_ISCSI_TASK 4096 | 38 | #define QEDI_MAX_ISCSI_TASK 4096 |
| 39 | #define QEDI_MAX_TASK_NUM 0x0FFF | 39 | #define QEDI_MAX_TASK_NUM 0x0FFF |
| 40 | #define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024 | 40 | #define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024 |
| 41 | #define QEDI_ISCSI_MAX_BDS_PER_CMD 256 /* Firmware max BDs is 256 */ | 41 | #define QEDI_ISCSI_MAX_BDS_PER_CMD 255 /* Firmware max BDs is 255 */ |
| 42 | #define MAX_OUSTANDING_TASKS_PER_CON 1024 | 42 | #define MAX_OUSTANDING_TASKS_PER_CON 1024 |
| 43 | 43 | ||
| 44 | #define QEDI_MAX_BD_LEN 0xffff | 44 | #define QEDI_MAX_BD_LEN 0xffff |
| @@ -63,6 +63,7 @@ struct qedi_endpoint; | |||
| 63 | #define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1)) | 63 | #define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1)) |
| 64 | 64 | ||
| 65 | #define QEDI_PAGE_SIZE 4096 | 65 | #define QEDI_PAGE_SIZE 4096 |
| 66 | #define QEDI_HW_DMA_BOUNDARY 0xfff | ||
| 66 | #define QEDI_PATH_HANDLE 0xFE0000000UL | 67 | #define QEDI_PATH_HANDLE 0xFE0000000UL |
| 67 | 68 | ||
| 68 | struct qedi_uio_ctrl { | 69 | struct qedi_uio_ctrl { |
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index d6978cbc56f0..507512cc478b 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c | |||
| @@ -870,7 +870,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, | |||
| 870 | QEDI_ERR(&qedi->dbg_ctx, | 870 | QEDI_ERR(&qedi->dbg_ctx, |
| 871 | "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n", | 871 | "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n", |
| 872 | protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task); | 872 | protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task); |
| 873 | WARN_ON(1); | ||
| 874 | } | 873 | } |
| 875 | } | 874 | } |
| 876 | 875 | ||
| @@ -1494,6 +1493,8 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, | |||
| 1494 | tmf_hdr = (struct iscsi_tm *)mtask->hdr; | 1493 | tmf_hdr = (struct iscsi_tm *)mtask->hdr; |
| 1495 | qedi_cmd = (struct qedi_cmd *)mtask->dd_data; | 1494 | qedi_cmd = (struct qedi_cmd *)mtask->dd_data; |
| 1496 | ep = qedi_conn->ep; | 1495 | ep = qedi_conn->ep; |
| 1496 | if (!ep) | ||
| 1497 | return -ENODEV; | ||
| 1497 | 1498 | ||
| 1498 | tid = qedi_get_task_idx(qedi); | 1499 | tid = qedi_get_task_idx(qedi); |
| 1499 | if (tid == -1) | 1500 | if (tid == -1) |
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 3548d46f9b27..87f0af358b33 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c | |||
| @@ -59,6 +59,7 @@ struct scsi_host_template qedi_host_template = { | |||
| 59 | .this_id = -1, | 59 | .this_id = -1, |
| 60 | .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD, | 60 | .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD, |
| 61 | .max_sectors = 0xffff, | 61 | .max_sectors = 0xffff, |
| 62 | .dma_boundary = QEDI_HW_DMA_BOUNDARY, | ||
| 62 | .cmd_per_lun = 128, | 63 | .cmd_per_lun = 128, |
| 63 | .use_clustering = ENABLE_CLUSTERING, | 64 | .use_clustering = ENABLE_CLUSTERING, |
| 64 | .shost_attrs = qedi_shost_attrs, | 65 | .shost_attrs = qedi_shost_attrs, |
| @@ -1223,8 +1224,12 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) | |||
| 1223 | 1224 | ||
| 1224 | iscsi_cid = (u32)path_data->handle; | 1225 | iscsi_cid = (u32)path_data->handle; |
| 1225 | qedi_ep = qedi->ep_tbl[iscsi_cid]; | 1226 | qedi_ep = qedi->ep_tbl[iscsi_cid]; |
| 1226 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, | 1227 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, |
| 1227 | "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); | 1228 | "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); |
| 1229 | if (!qedi_ep) { | ||
| 1230 | ret = -EINVAL; | ||
| 1231 | goto set_path_exit; | ||
| 1232 | } | ||
| 1228 | 1233 | ||
| 1229 | if (!is_valid_ether_addr(&path_data->mac_addr[0])) { | 1234 | if (!is_valid_ether_addr(&path_data->mac_addr[0])) { |
| 1230 | QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); | 1235 | QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); |
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 92775a8b74b1..879d3b7462f9 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c | |||
| @@ -151,6 +151,11 @@ static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode) | |||
| 151 | 151 | ||
| 152 | static void __qedi_free_uio_rings(struct qedi_uio_dev *udev) | 152 | static void __qedi_free_uio_rings(struct qedi_uio_dev *udev) |
| 153 | { | 153 | { |
| 154 | if (udev->uctrl) { | ||
| 155 | free_page((unsigned long)udev->uctrl); | ||
| 156 | udev->uctrl = NULL; | ||
| 157 | } | ||
| 158 | |||
| 154 | if (udev->ll2_ring) { | 159 | if (udev->ll2_ring) { |
| 155 | free_page((unsigned long)udev->ll2_ring); | 160 | free_page((unsigned long)udev->ll2_ring); |
| 156 | udev->ll2_ring = NULL; | 161 | udev->ll2_ring = NULL; |
| @@ -169,7 +174,6 @@ static void __qedi_free_uio(struct qedi_uio_dev *udev) | |||
| 169 | __qedi_free_uio_rings(udev); | 174 | __qedi_free_uio_rings(udev); |
| 170 | 175 | ||
| 171 | pci_dev_put(udev->pdev); | 176 | pci_dev_put(udev->pdev); |
| 172 | kfree(udev->uctrl); | ||
| 173 | kfree(udev); | 177 | kfree(udev); |
| 174 | } | 178 | } |
| 175 | 179 | ||
| @@ -208,6 +212,11 @@ static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev) | |||
| 208 | if (udev->ll2_ring || udev->ll2_buf) | 212 | if (udev->ll2_ring || udev->ll2_buf) |
| 209 | return rc; | 213 | return rc; |
| 210 | 214 | ||
| 215 | /* Memory for control area. */ | ||
| 216 | udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL); | ||
| 217 | if (!udev->uctrl) | ||
| 218 | return -ENOMEM; | ||
| 219 | |||
| 211 | /* Allocating memory for LL2 ring */ | 220 | /* Allocating memory for LL2 ring */ |
| 212 | udev->ll2_ring_size = QEDI_PAGE_SIZE; | 221 | udev->ll2_ring_size = QEDI_PAGE_SIZE; |
| 213 | udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP); | 222 | udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP); |
| @@ -237,7 +246,6 @@ exit_alloc_ring: | |||
| 237 | static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) | 246 | static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) |
| 238 | { | 247 | { |
| 239 | struct qedi_uio_dev *udev = NULL; | 248 | struct qedi_uio_dev *udev = NULL; |
| 240 | struct qedi_uio_ctrl *uctrl = NULL; | ||
| 241 | int rc = 0; | 249 | int rc = 0; |
| 242 | 250 | ||
| 243 | list_for_each_entry(udev, &qedi_udev_list, list) { | 251 | list_for_each_entry(udev, &qedi_udev_list, list) { |
| @@ -258,21 +266,14 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) | |||
| 258 | goto err_udev; | 266 | goto err_udev; |
| 259 | } | 267 | } |
| 260 | 268 | ||
| 261 | uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL); | ||
| 262 | if (!uctrl) { | ||
| 263 | rc = -ENOMEM; | ||
| 264 | goto err_uctrl; | ||
| 265 | } | ||
| 266 | |||
| 267 | udev->uio_dev = -1; | 269 | udev->uio_dev = -1; |
| 268 | 270 | ||
| 269 | udev->qedi = qedi; | 271 | udev->qedi = qedi; |
| 270 | udev->pdev = qedi->pdev; | 272 | udev->pdev = qedi->pdev; |
| 271 | udev->uctrl = uctrl; | ||
| 272 | 273 | ||
| 273 | rc = __qedi_alloc_uio_rings(udev); | 274 | rc = __qedi_alloc_uio_rings(udev); |
| 274 | if (rc) | 275 | if (rc) |
| 275 | goto err_uio_rings; | 276 | goto err_uctrl; |
| 276 | 277 | ||
| 277 | list_add(&udev->list, &qedi_udev_list); | 278 | list_add(&udev->list, &qedi_udev_list); |
| 278 | 279 | ||
| @@ -283,8 +284,6 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) | |||
| 283 | udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE; | 284 | udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE; |
| 284 | return 0; | 285 | return 0; |
| 285 | 286 | ||
| 286 | err_uio_rings: | ||
| 287 | kfree(uctrl); | ||
| 288 | err_uctrl: | 287 | err_uctrl: |
| 289 | kfree(udev); | 288 | kfree(udev); |
| 290 | err_udev: | 289 | err_udev: |
| @@ -828,6 +827,8 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi) | |||
| 828 | qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages; | 827 | qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages; |
| 829 | qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues; | 828 | qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues; |
| 830 | qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug; | 829 | qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug; |
| 830 | qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000; | ||
| 831 | qedi->pf_params.iscsi_pf_params.max_fin_rt = 2; | ||
| 831 | 832 | ||
| 832 | for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { | 833 | for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { |
| 833 | if ((1 << log_page_size) == PAGE_SIZE) | 834 | if ((1 << log_page_size) == PAGE_SIZE) |
| @@ -1498,11 +1499,9 @@ err_idx: | |||
| 1498 | 1499 | ||
| 1499 | void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) | 1500 | void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) |
| 1500 | { | 1501 | { |
| 1501 | if (!test_and_clear_bit(idx, qedi->task_idx_map)) { | 1502 | if (!test_and_clear_bit(idx, qedi->task_idx_map)) |
| 1502 | QEDI_ERR(&qedi->dbg_ctx, | 1503 | QEDI_ERR(&qedi->dbg_ctx, |
| 1503 | "FW task context, already cleared, tid=0x%x\n", idx); | 1504 | "FW task context, already cleared, tid=0x%x\n", idx); |
| 1504 | WARN_ON(1); | ||
| 1505 | } | ||
| 1506 | } | 1505 | } |
| 1507 | 1506 | ||
| 1508 | void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, | 1507 | void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, |
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 16d1cd50feed..ca3420de5a01 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c | |||
| @@ -730,6 +730,8 @@ qla2x00_process_loopback(struct bsg_job *bsg_job) | |||
| 730 | return -EIO; | 730 | return -EIO; |
| 731 | } | 731 | } |
| 732 | 732 | ||
| 733 | memset(&elreq, 0, sizeof(elreq)); | ||
| 734 | |||
| 733 | elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, | 735 | elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, |
| 734 | bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, | 736 | bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, |
| 735 | DMA_TO_DEVICE); | 737 | DMA_TO_DEVICE); |
| @@ -795,10 +797,9 @@ qla2x00_process_loopback(struct bsg_job *bsg_job) | |||
| 795 | 797 | ||
| 796 | if (atomic_read(&vha->loop_state) == LOOP_READY && | 798 | if (atomic_read(&vha->loop_state) == LOOP_READY && |
| 797 | (ha->current_topology == ISP_CFG_F || | 799 | (ha->current_topology == ISP_CFG_F || |
| 798 | ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && | 800 | (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE && |
| 799 | le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE | 801 | req_data_len == MAX_ELS_FRAME_PAYLOAD)) && |
| 800 | && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && | 802 | elreq.options == EXTERNAL_LOOPBACK) { |
| 801 | elreq.options == EXTERNAL_LOOPBACK) { | ||
| 802 | type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; | 803 | type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; |
| 803 | ql_dbg(ql_dbg_user, vha, 0x701e, | 804 | ql_dbg(ql_dbg_user, vha, 0x701e, |
| 804 | "BSG request type: %s.\n", type); | 805 | "BSG request type: %s.\n", type); |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 51b4179469d1..88748a6ab73f 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
| @@ -1131,7 +1131,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 1131 | 1131 | ||
| 1132 | /* Mailbox registers. */ | 1132 | /* Mailbox registers. */ |
| 1133 | mbx_reg = ®->mailbox0; | 1133 | mbx_reg = ®->mailbox0; |
| 1134 | for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) | 1134 | for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) |
| 1135 | fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); | 1135 | fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); |
| 1136 | 1136 | ||
| 1137 | /* Transfer sequence registers. */ | 1137 | /* Transfer sequence registers. */ |
| @@ -2090,7 +2090,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
| 2090 | 2090 | ||
| 2091 | /* Mailbox registers. */ | 2091 | /* Mailbox registers. */ |
| 2092 | mbx_reg = ®->mailbox0; | 2092 | mbx_reg = ®->mailbox0; |
| 2093 | for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) | 2093 | for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) |
| 2094 | fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); | 2094 | fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); |
| 2095 | 2095 | ||
| 2096 | /* Transfer sequence registers. */ | 2096 | /* Transfer sequence registers. */ |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index ae119018dfaa..eddbc1218a39 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
| @@ -3425,6 +3425,7 @@ struct qla_hw_data { | |||
| 3425 | uint8_t max_req_queues; | 3425 | uint8_t max_req_queues; |
| 3426 | uint8_t max_rsp_queues; | 3426 | uint8_t max_rsp_queues; |
| 3427 | uint8_t max_qpairs; | 3427 | uint8_t max_qpairs; |
| 3428 | uint8_t num_qpairs; | ||
| 3428 | struct qla_qpair *base_qpair; | 3429 | struct qla_qpair *base_qpair; |
| 3429 | struct qla_npiv_entry *npiv_info; | 3430 | struct qla_npiv_entry *npiv_info; |
| 3430 | uint16_t nvram_npiv_size; | 3431 | uint16_t nvram_npiv_size; |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 034743309ada..0391fc317003 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
| @@ -7543,12 +7543,13 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v | |||
| 7543 | /* Assign available que pair id */ | 7543 | /* Assign available que pair id */ |
| 7544 | mutex_lock(&ha->mq_lock); | 7544 | mutex_lock(&ha->mq_lock); |
| 7545 | qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); | 7545 | qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); |
| 7546 | if (qpair_id >= ha->max_qpairs) { | 7546 | if (ha->num_qpairs >= ha->max_qpairs) { |
| 7547 | mutex_unlock(&ha->mq_lock); | 7547 | mutex_unlock(&ha->mq_lock); |
| 7548 | ql_log(ql_log_warn, vha, 0x0183, | 7548 | ql_log(ql_log_warn, vha, 0x0183, |
| 7549 | "No resources to create additional q pair.\n"); | 7549 | "No resources to create additional q pair.\n"); |
| 7550 | goto fail_qid_map; | 7550 | goto fail_qid_map; |
| 7551 | } | 7551 | } |
| 7552 | ha->num_qpairs++; | ||
| 7552 | set_bit(qpair_id, ha->qpair_qid_map); | 7553 | set_bit(qpair_id, ha->qpair_qid_map); |
| 7553 | ha->queue_pair_map[qpair_id] = qpair; | 7554 | ha->queue_pair_map[qpair_id] = qpair; |
| 7554 | qpair->id = qpair_id; | 7555 | qpair->id = qpair_id; |
| @@ -7635,6 +7636,7 @@ fail_rsp: | |||
| 7635 | fail_msix: | 7636 | fail_msix: |
| 7636 | ha->queue_pair_map[qpair_id] = NULL; | 7637 | ha->queue_pair_map[qpair_id] = NULL; |
| 7637 | clear_bit(qpair_id, ha->qpair_qid_map); | 7638 | clear_bit(qpair_id, ha->qpair_qid_map); |
| 7639 | ha->num_qpairs--; | ||
| 7638 | mutex_unlock(&ha->mq_lock); | 7640 | mutex_unlock(&ha->mq_lock); |
| 7639 | fail_qid_map: | 7641 | fail_qid_map: |
| 7640 | kfree(qpair); | 7642 | kfree(qpair); |
| @@ -7660,6 +7662,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) | |||
| 7660 | mutex_lock(&ha->mq_lock); | 7662 | mutex_lock(&ha->mq_lock); |
| 7661 | ha->queue_pair_map[qpair->id] = NULL; | 7663 | ha->queue_pair_map[qpair->id] = NULL; |
| 7662 | clear_bit(qpair->id, ha->qpair_qid_map); | 7664 | clear_bit(qpair->id, ha->qpair_qid_map); |
| 7665 | ha->num_qpairs--; | ||
| 7663 | list_del(&qpair->qp_list_elem); | 7666 | list_del(&qpair->qp_list_elem); |
| 7664 | if (list_empty(&vha->qp_list)) | 7667 | if (list_empty(&vha->qp_list)) |
| 7665 | vha->flags.qpairs_available = 0; | 7668 | vha->flags.qpairs_available = 0; |
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 66df6cec59da..c61a6a871c8e 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h | |||
| @@ -129,28 +129,16 @@ qla2x00_clear_loop_id(fc_port_t *fcport) { | |||
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | static inline void | 131 | static inline void |
| 132 | qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp, | 132 | qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx) |
| 133 | struct qla_tgt_cmd *tc) | ||
| 134 | { | 133 | { |
| 135 | struct dsd_dma *dsd_ptr, *tdsd_ptr; | 134 | struct dsd_dma *dsd, *tdsd; |
| 136 | struct crc_context *ctx; | ||
| 137 | |||
| 138 | if (sp) | ||
| 139 | ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); | ||
| 140 | else if (tc) | ||
| 141 | ctx = (struct crc_context *)tc->ctx; | ||
| 142 | else { | ||
| 143 | BUG(); | ||
| 144 | return; | ||
| 145 | } | ||
| 146 | 135 | ||
| 147 | /* clean up allocated prev pool */ | 136 | /* clean up allocated prev pool */ |
| 148 | list_for_each_entry_safe(dsd_ptr, tdsd_ptr, | 137 | list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) { |
| 149 | &ctx->dsd_list, list) { | 138 | dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr, |
| 150 | dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, | 139 | dsd->dsd_list_dma); |
| 151 | dsd_ptr->dsd_list_dma); | 140 | list_del(&dsd->list); |
| 152 | list_del(&dsd_ptr->list); | 141 | kfree(dsd); |
| 153 | kfree(dsd_ptr); | ||
| 154 | } | 142 | } |
| 155 | INIT_LIST_HEAD(&ctx->dsd_list); | 143 | INIT_LIST_HEAD(&ctx->dsd_list); |
| 156 | } | 144 | } |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index aac03504d9a3..2572121b765b 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
| @@ -3282,7 +3282,7 @@ msix_register_fail: | |||
| 3282 | } | 3282 | } |
| 3283 | 3283 | ||
| 3284 | /* Enable MSI-X vector for response queue update for queue 0 */ | 3284 | /* Enable MSI-X vector for response queue update for queue 0 */ |
| 3285 | if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { | 3285 | if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { |
| 3286 | if (ha->msixbase && ha->mqiobase && | 3286 | if (ha->msixbase && ha->mqiobase && |
| 3287 | (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || | 3287 | (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || |
| 3288 | ql2xmqsupport)) | 3288 | ql2xmqsupport)) |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index a113ab3592a7..cba1fc5e8be9 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
| @@ -3676,15 +3676,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
| 3676 | qlt_update_host_map(vha, id); | 3676 | qlt_update_host_map(vha, id); |
| 3677 | } | 3677 | } |
| 3678 | 3678 | ||
| 3679 | fc_host_port_name(vha->host) = | ||
| 3680 | wwn_to_u64(vha->port_name); | ||
| 3681 | |||
| 3682 | if (qla_ini_mode_enabled(vha)) | ||
| 3683 | ql_dbg(ql_dbg_mbx, vha, 0x1018, | ||
| 3684 | "FA-WWN portname %016llx (%x)\n", | ||
| 3685 | fc_host_port_name(vha->host), | ||
| 3686 | rptid_entry->vp_status); | ||
| 3687 | |||
| 3688 | set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); | 3679 | set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); |
| 3689 | set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); | 3680 | set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); |
| 3690 | } else { | 3681 | } else { |
| @@ -4821,9 +4812,9 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, | |||
| 4821 | 4812 | ||
| 4822 | memset(mcp->mb, 0 , sizeof(mcp->mb)); | 4813 | memset(mcp->mb, 0 , sizeof(mcp->mb)); |
| 4823 | mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; | 4814 | mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; |
| 4824 | mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ | 4815 | /* BIT_6 specifies 64bit address */ |
| 4816 | mcp->mb[1] = mreq->options | BIT_15 | BIT_6; | ||
| 4825 | if (IS_CNA_CAPABLE(ha)) { | 4817 | if (IS_CNA_CAPABLE(ha)) { |
| 4826 | mcp->mb[1] |= BIT_15; | ||
| 4827 | mcp->mb[2] = vha->fcoe_fcf_idx; | 4818 | mcp->mb[2] = vha->fcoe_fcf_idx; |
| 4828 | } | 4819 | } |
| 4829 | mcp->mb[16] = LSW(mreq->rcv_dma); | 4820 | mcp->mb[16] = LSW(mreq->rcv_dma); |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1c7957903283..79f050256c55 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
| @@ -630,29 +630,34 @@ qla2x00_sp_free_dma(void *ptr) | |||
| 630 | sp->flags &= ~SRB_CRC_PROT_DMA_VALID; | 630 | sp->flags &= ~SRB_CRC_PROT_DMA_VALID; |
| 631 | } | 631 | } |
| 632 | 632 | ||
| 633 | if (!ctx) | ||
| 634 | goto end; | ||
| 635 | |||
| 633 | if (sp->flags & SRB_CRC_CTX_DSD_VALID) { | 636 | if (sp->flags & SRB_CRC_CTX_DSD_VALID) { |
| 634 | /* List assured to be having elements */ | 637 | /* List assured to be having elements */ |
| 635 | qla2x00_clean_dsd_pool(ha, sp, NULL); | 638 | qla2x00_clean_dsd_pool(ha, ctx); |
| 636 | sp->flags &= ~SRB_CRC_CTX_DSD_VALID; | 639 | sp->flags &= ~SRB_CRC_CTX_DSD_VALID; |
| 637 | } | 640 | } |
| 638 | 641 | ||
| 639 | if (sp->flags & SRB_CRC_CTX_DMA_VALID) { | 642 | if (sp->flags & SRB_CRC_CTX_DMA_VALID) { |
| 640 | dma_pool_free(ha->dl_dma_pool, ctx, | 643 | struct crc_context *ctx0 = ctx; |
| 641 | ((struct crc_context *)ctx)->crc_ctx_dma); | 644 | |
| 645 | dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); | ||
| 642 | sp->flags &= ~SRB_CRC_CTX_DMA_VALID; | 646 | sp->flags &= ~SRB_CRC_CTX_DMA_VALID; |
| 643 | } | 647 | } |
| 644 | 648 | ||
| 645 | if (sp->flags & SRB_FCP_CMND_DMA_VALID) { | 649 | if (sp->flags & SRB_FCP_CMND_DMA_VALID) { |
| 646 | struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; | 650 | struct ct6_dsd *ctx1 = ctx; |
| 647 | 651 | ||
| 648 | dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, | 652 | dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, |
| 649 | ctx1->fcp_cmnd_dma); | 653 | ctx1->fcp_cmnd_dma); |
| 650 | list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); | 654 | list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); |
| 651 | ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; | 655 | ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; |
| 652 | ha->gbl_dsd_avail += ctx1->dsd_use_cnt; | 656 | ha->gbl_dsd_avail += ctx1->dsd_use_cnt; |
| 653 | mempool_free(ctx1, ha->ctx_mempool); | 657 | mempool_free(ctx1, ha->ctx_mempool); |
| 654 | } | 658 | } |
| 655 | 659 | ||
| 660 | end: | ||
| 656 | CMD_SP(cmd) = NULL; | 661 | CMD_SP(cmd) = NULL; |
| 657 | qla2x00_rel_sp(sp); | 662 | qla2x00_rel_sp(sp); |
| 658 | } | 663 | } |
| @@ -699,21 +704,24 @@ qla2xxx_qpair_sp_free_dma(void *ptr) | |||
| 699 | sp->flags &= ~SRB_CRC_PROT_DMA_VALID; | 704 | sp->flags &= ~SRB_CRC_PROT_DMA_VALID; |
| 700 | } | 705 | } |
| 701 | 706 | ||
| 707 | if (!ctx) | ||
| 708 | goto end; | ||
| 709 | |||
| 702 | if (sp->flags & SRB_CRC_CTX_DSD_VALID) { | 710 | if (sp->flags & SRB_CRC_CTX_DSD_VALID) { |
| 703 | /* List assured to be having elements */ | 711 | /* List assured to be having elements */ |
| 704 | qla2x00_clean_dsd_pool(ha, sp, NULL); | 712 | qla2x00_clean_dsd_pool(ha, ctx); |
| 705 | sp->flags &= ~SRB_CRC_CTX_DSD_VALID; | 713 | sp->flags &= ~SRB_CRC_CTX_DSD_VALID; |
| 706 | } | 714 | } |
| 707 | 715 | ||
| 708 | if (sp->flags & SRB_CRC_CTX_DMA_VALID) { | 716 | if (sp->flags & SRB_CRC_CTX_DMA_VALID) { |
| 709 | dma_pool_free(ha->dl_dma_pool, ctx, | 717 | struct crc_context *ctx0 = ctx; |
| 710 | ((struct crc_context *)ctx)->crc_ctx_dma); | 718 | |
| 719 | dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma); | ||
| 711 | sp->flags &= ~SRB_CRC_CTX_DMA_VALID; | 720 | sp->flags &= ~SRB_CRC_CTX_DMA_VALID; |
| 712 | } | 721 | } |
| 713 | 722 | ||
| 714 | if (sp->flags & SRB_FCP_CMND_DMA_VALID) { | 723 | if (sp->flags & SRB_FCP_CMND_DMA_VALID) { |
| 715 | struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; | 724 | struct ct6_dsd *ctx1 = ctx; |
| 716 | |||
| 717 | dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, | 725 | dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, |
| 718 | ctx1->fcp_cmnd_dma); | 726 | ctx1->fcp_cmnd_dma); |
| 719 | list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); | 727 | list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); |
| @@ -721,7 +729,7 @@ qla2xxx_qpair_sp_free_dma(void *ptr) | |||
| 721 | ha->gbl_dsd_avail += ctx1->dsd_use_cnt; | 729 | ha->gbl_dsd_avail += ctx1->dsd_use_cnt; |
| 722 | mempool_free(ctx1, ha->ctx_mempool); | 730 | mempool_free(ctx1, ha->ctx_mempool); |
| 723 | } | 731 | } |
| 724 | 732 | end: | |
| 725 | CMD_SP(cmd) = NULL; | 733 | CMD_SP(cmd) = NULL; |
| 726 | qla2xxx_rel_qpair_sp(sp->qpair, sp); | 734 | qla2xxx_rel_qpair_sp(sp->qpair, sp); |
| 727 | } | 735 | } |
| @@ -1632,7 +1640,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) | |||
| 1632 | void | 1640 | void |
| 1633 | qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | 1641 | qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) |
| 1634 | { | 1642 | { |
| 1635 | int que, cnt; | 1643 | int que, cnt, status; |
| 1636 | unsigned long flags; | 1644 | unsigned long flags; |
| 1637 | srb_t *sp; | 1645 | srb_t *sp; |
| 1638 | struct qla_hw_data *ha = vha->hw; | 1646 | struct qla_hw_data *ha = vha->hw; |
| @@ -1662,8 +1670,12 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
| 1662 | */ | 1670 | */ |
| 1663 | sp_get(sp); | 1671 | sp_get(sp); |
| 1664 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1672 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 1665 | qla2xxx_eh_abort(GET_CMD_SP(sp)); | 1673 | status = qla2xxx_eh_abort(GET_CMD_SP(sp)); |
| 1666 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1674 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 1675 | /* Get rid of extra reference if immediate exit | ||
| 1676 | * from ql2xxx_eh_abort */ | ||
| 1677 | if (status == FAILED && (qla2x00_isp_reg_stat(ha))) | ||
| 1678 | atomic_dec(&sp->ref_count); | ||
| 1667 | } | 1679 | } |
| 1668 | req->outstanding_cmds[cnt] = NULL; | 1680 | req->outstanding_cmds[cnt] = NULL; |
| 1669 | sp->done(sp, res); | 1681 | sp->done(sp, res); |
| @@ -2623,10 +2635,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2623 | 2635 | ||
| 2624 | if (mem_only) { | 2636 | if (mem_only) { |
| 2625 | if (pci_enable_device_mem(pdev)) | 2637 | if (pci_enable_device_mem(pdev)) |
| 2626 | goto probe_out; | 2638 | return ret; |
| 2627 | } else { | 2639 | } else { |
| 2628 | if (pci_enable_device(pdev)) | 2640 | if (pci_enable_device(pdev)) |
| 2629 | goto probe_out; | 2641 | return ret; |
| 2630 | } | 2642 | } |
| 2631 | 2643 | ||
| 2632 | /* This may fail but that's ok */ | 2644 | /* This may fail but that's ok */ |
| @@ -2636,7 +2648,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2636 | if (!ha) { | 2648 | if (!ha) { |
| 2637 | ql_log_pci(ql_log_fatal, pdev, 0x0009, | 2649 | ql_log_pci(ql_log_fatal, pdev, 0x0009, |
| 2638 | "Unable to allocate memory for ha.\n"); | 2650 | "Unable to allocate memory for ha.\n"); |
| 2639 | goto probe_out; | 2651 | goto disable_device; |
| 2640 | } | 2652 | } |
| 2641 | ql_dbg_pci(ql_dbg_init, pdev, 0x000a, | 2653 | ql_dbg_pci(ql_dbg_init, pdev, 0x000a, |
| 2642 | "Memory allocated for ha=%p.\n", ha); | 2654 | "Memory allocated for ha=%p.\n", ha); |
| @@ -3254,7 +3266,7 @@ iospace_config_failed: | |||
| 3254 | pci_release_selected_regions(ha->pdev, ha->bars); | 3266 | pci_release_selected_regions(ha->pdev, ha->bars); |
| 3255 | kfree(ha); | 3267 | kfree(ha); |
| 3256 | 3268 | ||
| 3257 | probe_out: | 3269 | disable_device: |
| 3258 | pci_disable_device(pdev); | 3270 | pci_disable_device(pdev); |
| 3259 | return ret; | 3271 | return ret; |
| 3260 | } | 3272 | } |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 0e03ca2ab3e5..e766d8412384 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
| @@ -2245,11 +2245,13 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) | |||
| 2245 | pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, | 2245 | pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, |
| 2246 | cmd->dma_data_direction); | 2246 | cmd->dma_data_direction); |
| 2247 | 2247 | ||
| 2248 | if (!cmd->ctx) | ||
| 2249 | return; | ||
| 2250 | |||
| 2248 | if (cmd->ctx_dsd_alloced) | 2251 | if (cmd->ctx_dsd_alloced) |
| 2249 | qla2x00_clean_dsd_pool(ha, NULL, cmd); | 2252 | qla2x00_clean_dsd_pool(ha, cmd->ctx); |
| 2250 | 2253 | ||
| 2251 | if (cmd->ctx) | 2254 | dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); |
| 2252 | dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); | ||
| 2253 | } | 2255 | } |
| 2254 | 2256 | ||
| 2255 | static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, | 2257 | static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, |
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 8a58ef3adab4..c197972a3e2d 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c | |||
| @@ -371,7 +371,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, | |||
| 371 | goto done; | 371 | goto done; |
| 372 | } | 372 | } |
| 373 | 373 | ||
| 374 | if (end <= start || start == 0 || end == 0) { | 374 | if (end < start || start == 0 || end == 0) { |
| 375 | ql_dbg(ql_dbg_misc, vha, 0xd023, | 375 | ql_dbg(ql_dbg_misc, vha, 0xd023, |
| 376 | "%s: unusable range (start=%x end=%x)\n", __func__, | 376 | "%s: unusable range (start=%x end=%x)\n", __func__, |
| 377 | ent->t262.end_addr, ent->t262.start_addr); | 377 | ent->t262.end_addr, ent->t262.start_addr); |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 17249c3650fe..dc095a292c61 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
| @@ -1404,7 +1404,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
| 1404 | arr[4] = SDEBUG_LONG_INQ_SZ - 5; | 1404 | arr[4] = SDEBUG_LONG_INQ_SZ - 5; |
| 1405 | arr[5] = (int)have_dif_prot; /* PROTECT bit */ | 1405 | arr[5] = (int)have_dif_prot; /* PROTECT bit */ |
| 1406 | if (sdebug_vpd_use_hostno == 0) | 1406 | if (sdebug_vpd_use_hostno == 0) |
| 1407 | arr[5] = 0x10; /* claim: implicit TGPS */ | 1407 | arr[5] |= 0x10; /* claim: implicit TPGS */ |
| 1408 | arr[6] = 0x10; /* claim: MultiP */ | 1408 | arr[6] = 0x10; /* claim: MultiP */ |
| 1409 | /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ | 1409 | /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ |
| 1410 | arr[7] = 0xa; /* claim: LINKED + CMDQUE */ | 1410 | arr[7] = 0xa; /* claim: LINKED + CMDQUE */ |
diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig index ae627049c499..4be87f503e3b 100644 --- a/drivers/staging/ccree/Kconfig +++ b/drivers/staging/ccree/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config CRYPTO_DEV_CCREE | 1 | config CRYPTO_DEV_CCREE |
| 2 | tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators" | 2 | tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators" |
| 3 | depends on CRYPTO_HW && OF && HAS_DMA | 3 | depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA |
| 4 | default n | 4 | default n |
| 5 | select CRYPTO_HASH | 5 | select CRYPTO_HASH |
| 6 | select CRYPTO_BLKCIPHER | 6 | select CRYPTO_BLKCIPHER |
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c index 038e2ff5e545..6471d3d2d375 100644 --- a/drivers/staging/ccree/ssi_buffer_mgr.c +++ b/drivers/staging/ccree/ssi_buffer_mgr.c | |||
| @@ -216,7 +216,8 @@ void ssi_buffer_mgr_copy_scatterlist_portion( | |||
| 216 | uint32_t nents, lbytes; | 216 | uint32_t nents, lbytes; |
| 217 | 217 | ||
| 218 | nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL); | 218 | nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL); |
| 219 | sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF)); | 219 | sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip, |
| 220 | (direct == SSI_SG_TO_BUF)); | ||
| 220 | } | 221 | } |
| 221 | 222 | ||
| 222 | static inline int ssi_buffer_mgr_render_buff_to_mlli( | 223 | static inline int ssi_buffer_mgr_render_buff_to_mlli( |
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c index dc6ecd824365..ff10d1f0a7e4 100644 --- a/drivers/staging/iio/cdc/ad7152.c +++ b/drivers/staging/iio/cdc/ad7152.c | |||
| @@ -231,16 +231,12 @@ static int ad7152_write_raw_samp_freq(struct device *dev, int val) | |||
| 231 | if (i >= ARRAY_SIZE(ad7152_filter_rate_table)) | 231 | if (i >= ARRAY_SIZE(ad7152_filter_rate_table)) |
| 232 | i = ARRAY_SIZE(ad7152_filter_rate_table) - 1; | 232 | i = ARRAY_SIZE(ad7152_filter_rate_table) - 1; |
| 233 | 233 | ||
| 234 | mutex_lock(&chip->state_lock); | ||
| 235 | ret = i2c_smbus_write_byte_data(chip->client, | 234 | ret = i2c_smbus_write_byte_data(chip->client, |
| 236 | AD7152_REG_CFG2, AD7152_CFG2_OSR(i)); | 235 | AD7152_REG_CFG2, AD7152_CFG2_OSR(i)); |
| 237 | if (ret < 0) { | 236 | if (ret < 0) |
| 238 | mutex_unlock(&chip->state_lock); | ||
| 239 | return ret; | 237 | return ret; |
| 240 | } | ||
| 241 | 238 | ||
| 242 | chip->filter_rate_setup = i; | 239 | chip->filter_rate_setup = i; |
| 243 | mutex_unlock(&chip->state_lock); | ||
| 244 | 240 | ||
| 245 | return ret; | 241 | return ret; |
| 246 | } | 242 | } |
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c index 2e1bd47337fd..e6727cefde05 100644 --- a/drivers/staging/lustre/lustre/lov/lov_pack.c +++ b/drivers/staging/lustre/lustre/lov/lov_pack.c | |||
| @@ -293,18 +293,10 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm, | |||
| 293 | size_t lmmk_size; | 293 | size_t lmmk_size; |
| 294 | size_t lum_size; | 294 | size_t lum_size; |
| 295 | int rc; | 295 | int rc; |
| 296 | mm_segment_t seg; | ||
| 297 | 296 | ||
| 298 | if (!lsm) | 297 | if (!lsm) |
| 299 | return -ENODATA; | 298 | return -ENODATA; |
| 300 | 299 | ||
| 301 | /* | ||
| 302 | * "Switch to kernel segment" to allow copying from kernel space by | ||
| 303 | * copy_{to,from}_user(). | ||
| 304 | */ | ||
| 305 | seg = get_fs(); | ||
| 306 | set_fs(KERNEL_DS); | ||
| 307 | |||
| 308 | if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) { | 300 | if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) { |
| 309 | CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", | 301 | CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", |
| 310 | lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); | 302 | lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); |
| @@ -406,6 +398,5 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm, | |||
| 406 | out_free: | 398 | out_free: |
| 407 | kvfree(lmmk); | 399 | kvfree(lmmk); |
| 408 | out: | 400 | out: |
| 409 | set_fs(seg); | ||
| 410 | return rc; | 401 | return rc; |
| 411 | } | 402 | } |
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile index 8ea01904c0ea..466517c7c8e6 100644 --- a/drivers/staging/media/atomisp/i2c/Makefile +++ b/drivers/staging/media/atomisp/i2c/Makefile | |||
| @@ -19,5 +19,3 @@ obj-$(CONFIG_VIDEO_AP1302) += ap1302.o | |||
| 19 | 19 | ||
| 20 | obj-$(CONFIG_VIDEO_LM3554) += lm3554.o | 20 | obj-$(CONFIG_VIDEO_LM3554) += lm3554.o |
| 21 | 21 | ||
| 22 | ccflags-y += -Werror | ||
| 23 | |||
diff --git a/drivers/staging/media/atomisp/i2c/imx/Makefile b/drivers/staging/media/atomisp/i2c/imx/Makefile index 1d7f7ab94cac..6b13a3a66e49 100644 --- a/drivers/staging/media/atomisp/i2c/imx/Makefile +++ b/drivers/staging/media/atomisp/i2c/imx/Makefile | |||
| @@ -4,5 +4,3 @@ imx1x5-objs := imx.o drv201.o ad5816g.o dw9714.o dw9719.o dw9718.o vcm.o otp.o o | |||
| 4 | 4 | ||
| 5 | ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o | 5 | ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o |
| 6 | obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o | 6 | obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o |
| 7 | |||
| 8 | ccflags-y += -Werror | ||
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile index fceb9e9b881b..c9c0e1245858 100644 --- a/drivers/staging/media/atomisp/i2c/ov5693/Makefile +++ b/drivers/staging/media/atomisp/i2c/ov5693/Makefile | |||
| @@ -1,3 +1 @@ | |||
| 1 | obj-$(CONFIG_VIDEO_OV5693) += ov5693.o | obj-$(CONFIG_VIDEO_OV5693) += ov5693.o | |
| 2 | |||
| 3 | ccflags-y += -Werror | ||
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile index 3fa7c1c1479f..f126a89a08e9 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/Makefile +++ b/drivers/staging/media/atomisp/pci/atomisp2/Makefile | |||
| @@ -351,5 +351,5 @@ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__ | |||
| 351 | DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0 | 351 | DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0 |
| 352 | DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400 | 352 | DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400 |
| 353 | 353 | ||
| 354 | ccflags-y += $(INCLUDES) $(DEFINES) -fno-common -Werror | 354 | ccflags-y += $(INCLUDES) $(DEFINES) -fno-common |
| 355 | 355 | ||
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c index cfe37eb026d6..859d0d6051cd 100644 --- a/drivers/staging/rtl8188eu/os_dep/mon.c +++ b/drivers/staging/rtl8188eu/os_dep/mon.c | |||
| @@ -152,7 +152,7 @@ static const struct net_device_ops mon_netdev_ops = { | |||
| 152 | static void mon_setup(struct net_device *dev) | 152 | static void mon_setup(struct net_device *dev) |
| 153 | { | 153 | { |
| 154 | dev->netdev_ops = &mon_netdev_ops; | 154 | dev->netdev_ops = &mon_netdev_ops; |
| 155 | dev->destructor = free_netdev; | 155 | dev->needs_free_netdev = true; |
| 156 | ether_setup(dev); | 156 | ether_setup(dev); |
| 157 | dev->priv_flags |= IFF_NO_QUEUE; | 157 | dev->priv_flags |= IFF_NO_QUEUE; |
| 158 | dev->type = ARPHRD_IEEE80211; | 158 | dev->type = ARPHRD_IEEE80211; |
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c index 36c3189fc4b7..bd4352fe2de3 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c | |||
| @@ -2667,7 +2667,8 @@ static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, st | |||
| 2667 | mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP; | 2667 | mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP; |
| 2668 | strncpy(mon_ndev->name, name, IFNAMSIZ); | 2668 | strncpy(mon_ndev->name, name, IFNAMSIZ); |
| 2669 | mon_ndev->name[IFNAMSIZ - 1] = 0; | 2669 | mon_ndev->name[IFNAMSIZ - 1] = 0; |
| 2670 | mon_ndev->destructor = rtw_ndev_destructor; | 2670 | mon_ndev->needs_free_netdev = true; |
| 2671 | mon_ndev->priv_destructor = rtw_ndev_destructor; | ||
| 2671 | 2672 | ||
| 2672 | mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops; | 2673 | mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops; |
| 2673 | 2674 | ||
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c index f83cfc76505c..021589913681 100644 --- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c +++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c | |||
| @@ -1207,8 +1207,6 @@ void rtw_ndev_destructor(struct net_device *ndev) | |||
| 1207 | 1207 | ||
| 1208 | if (ndev->ieee80211_ptr) | 1208 | if (ndev->ieee80211_ptr) |
| 1209 | kfree((u8 *)ndev->ieee80211_ptr); | 1209 | kfree((u8 *)ndev->ieee80211_ptr); |
| 1210 | |||
| 1211 | free_netdev(ndev); | ||
| 1212 | } | 1210 | } |
| 1213 | 1211 | ||
| 1214 | void rtw_dev_unload(struct adapter *padapter) | 1212 | void rtw_dev_unload(struct adapter *padapter) |
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c index 02db59e8b593..aa16d1ab955b 100644 --- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c +++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c | |||
| @@ -160,7 +160,7 @@ static int isFileReadable(char *path) | |||
| 160 | oldfs = get_fs(); set_fs(get_ds()); | 160 | oldfs = get_fs(); set_fs(get_ds()); |
| 161 | 161 | ||
| 162 | if (1!=readFile(fp, &buf, 1)) | 162 | if (1!=readFile(fp, &buf, 1)) |
| 163 | ret = PTR_ERR(fp); | 163 | ret = -EINVAL; |
| 164 | 164 | ||
| 165 | set_fs(oldfs); | 165 | set_fs(oldfs); |
| 166 | filp_close(fp, NULL); | 166 | filp_close(fp, NULL); |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 26a9bcd5ee6a..3fdca2cdd8da 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
| @@ -1279,6 +1279,18 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, | |||
| 1279 | */ | 1279 | */ |
| 1280 | if (dump_payload) | 1280 | if (dump_payload) |
| 1281 | goto after_immediate_data; | 1281 | goto after_immediate_data; |
| 1282 | /* | ||
| 1283 | * Check for underflow case where both EDTL and immediate data payload | ||
| 1284 | * exceeds what is presented by CDB's TRANSFER LENGTH, and what has | ||
| 1285 | * already been set in target_cmd_size_check() as se_cmd->data_length. | ||
| 1286 | * | ||
| 1287 | * For this special case, fail the command and dump the immediate data | ||
| 1288 | * payload. | ||
| 1289 | */ | ||
| 1290 | if (cmd->first_burst_len > cmd->se_cmd.data_length) { | ||
| 1291 | cmd->sense_reason = TCM_INVALID_CDB_FIELD; | ||
| 1292 | goto after_immediate_data; | ||
| 1293 | } | ||
| 1282 | 1294 | ||
| 1283 | immed_ret = iscsit_handle_immediate_data(cmd, hdr, | 1295 | immed_ret = iscsit_handle_immediate_data(cmd, hdr, |
| 1284 | cmd->first_burst_len); | 1296 | cmd->first_burst_len); |
| @@ -3790,6 +3802,8 @@ int iscsi_target_tx_thread(void *arg) | |||
| 3790 | { | 3802 | { |
| 3791 | int ret = 0; | 3803 | int ret = 0; |
| 3792 | struct iscsi_conn *conn = arg; | 3804 | struct iscsi_conn *conn = arg; |
| 3805 | bool conn_freed = false; | ||
| 3806 | |||
| 3793 | /* | 3807 | /* |
| 3794 | * Allow ourselves to be interrupted by SIGINT so that a | 3808 | * Allow ourselves to be interrupted by SIGINT so that a |
| 3795 | * connection recovery / failure event can be triggered externally. | 3809 | * connection recovery / failure event can be triggered externally. |
| @@ -3815,12 +3829,14 @@ get_immediate: | |||
| 3815 | goto transport_err; | 3829 | goto transport_err; |
| 3816 | 3830 | ||
| 3817 | ret = iscsit_handle_response_queue(conn); | 3831 | ret = iscsit_handle_response_queue(conn); |
| 3818 | if (ret == 1) | 3832 | if (ret == 1) { |
| 3819 | goto get_immediate; | 3833 | goto get_immediate; |
| 3820 | else if (ret == -ECONNRESET) | 3834 | } else if (ret == -ECONNRESET) { |
| 3835 | conn_freed = true; | ||
| 3821 | goto out; | 3836 | goto out; |
| 3822 | else if (ret < 0) | 3837 | } else if (ret < 0) { |
| 3823 | goto transport_err; | 3838 | goto transport_err; |
| 3839 | } | ||
| 3824 | } | 3840 | } |
| 3825 | 3841 | ||
| 3826 | transport_err: | 3842 | transport_err: |
| @@ -3830,8 +3846,13 @@ transport_err: | |||
| 3830 | * responsible for cleaning up the early connection failure. | 3846 | * responsible for cleaning up the early connection failure. |
| 3831 | */ | 3847 | */ |
| 3832 | if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) | 3848 | if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) |
| 3833 | iscsit_take_action_for_connection_exit(conn); | 3849 | iscsit_take_action_for_connection_exit(conn, &conn_freed); |
| 3834 | out: | 3850 | out: |
| 3851 | if (!conn_freed) { | ||
| 3852 | while (!kthread_should_stop()) { | ||
| 3853 | msleep(100); | ||
| 3854 | } | ||
| 3855 | } | ||
| 3835 | return 0; | 3856 | return 0; |
| 3836 | } | 3857 | } |
| 3837 | 3858 | ||
| @@ -4004,6 +4025,7 @@ int iscsi_target_rx_thread(void *arg) | |||
| 4004 | { | 4025 | { |
| 4005 | int rc; | 4026 | int rc; |
| 4006 | struct iscsi_conn *conn = arg; | 4027 | struct iscsi_conn *conn = arg; |
| 4028 | bool conn_freed = false; | ||
| 4007 | 4029 | ||
| 4008 | /* | 4030 | /* |
| 4009 | * Allow ourselves to be interrupted by SIGINT so that a | 4031 | * Allow ourselves to be interrupted by SIGINT so that a |
| @@ -4016,7 +4038,7 @@ int iscsi_target_rx_thread(void *arg) | |||
| 4016 | */ | 4038 | */ |
| 4017 | rc = wait_for_completion_interruptible(&conn->rx_login_comp); | 4039 | rc = wait_for_completion_interruptible(&conn->rx_login_comp); |
| 4018 | if (rc < 0 || iscsi_target_check_conn_state(conn)) | 4040 | if (rc < 0 || iscsi_target_check_conn_state(conn)) |
| 4019 | return 0; | 4041 | goto out; |
| 4020 | 4042 | ||
| 4021 | if (!conn->conn_transport->iscsit_get_rx_pdu) | 4043 | if (!conn->conn_transport->iscsit_get_rx_pdu) |
| 4022 | return 0; | 4044 | return 0; |
| @@ -4025,7 +4047,15 @@ int iscsi_target_rx_thread(void *arg) | |||
| 4025 | 4047 | ||
| 4026 | if (!signal_pending(current)) | 4048 | if (!signal_pending(current)) |
| 4027 | atomic_set(&conn->transport_failed, 1); | 4049 | atomic_set(&conn->transport_failed, 1); |
| 4028 | iscsit_take_action_for_connection_exit(conn); | 4050 | iscsit_take_action_for_connection_exit(conn, &conn_freed); |
| 4051 | |||
| 4052 | out: | ||
| 4053 | if (!conn_freed) { | ||
| 4054 | while (!kthread_should_stop()) { | ||
| 4055 | msleep(100); | ||
| 4056 | } | ||
| 4057 | } | ||
| 4058 | |||
| 4029 | return 0; | 4059 | return 0; |
| 4030 | } | 4060 | } |
| 4031 | 4061 | ||
| @@ -4405,8 +4435,11 @@ static void iscsit_logout_post_handler_closesession( | |||
| 4405 | * always sleep waiting for RX/TX thread shutdown to complete | 4435 | * always sleep waiting for RX/TX thread shutdown to complete |
| 4406 | * within iscsit_close_connection(). | 4436 | * within iscsit_close_connection(). |
| 4407 | */ | 4437 | */ |
| 4408 | if (!conn->conn_transport->rdma_shutdown) | 4438 | if (!conn->conn_transport->rdma_shutdown) { |
| 4409 | sleep = cmpxchg(&conn->tx_thread_active, true, false); | 4439 | sleep = cmpxchg(&conn->tx_thread_active, true, false); |
| 4440 | if (!sleep) | ||
| 4441 | return; | ||
| 4442 | } | ||
| 4410 | 4443 | ||
| 4411 | atomic_set(&conn->conn_logout_remove, 0); | 4444 | atomic_set(&conn->conn_logout_remove, 0); |
| 4412 | complete(&conn->conn_logout_comp); | 4445 | complete(&conn->conn_logout_comp); |
| @@ -4422,8 +4455,11 @@ static void iscsit_logout_post_handler_samecid( | |||
| 4422 | { | 4455 | { |
| 4423 | int sleep = 1; | 4456 | int sleep = 1; |
| 4424 | 4457 | ||
| 4425 | if (!conn->conn_transport->rdma_shutdown) | 4458 | if (!conn->conn_transport->rdma_shutdown) { |
| 4426 | sleep = cmpxchg(&conn->tx_thread_active, true, false); | 4459 | sleep = cmpxchg(&conn->tx_thread_active, true, false); |
| 4460 | if (!sleep) | ||
| 4461 | return; | ||
| 4462 | } | ||
| 4427 | 4463 | ||
| 4428 | atomic_set(&conn->conn_logout_remove, 0); | 4464 | atomic_set(&conn->conn_logout_remove, 0); |
| 4429 | complete(&conn->conn_logout_comp); | 4465 | complete(&conn->conn_logout_comp); |
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 9a96e17bf7cd..7fe2aa73cff6 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c | |||
| @@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn) | |||
| 930 | } | 930 | } |
| 931 | } | 931 | } |
| 932 | 932 | ||
| 933 | void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) | 933 | void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed) |
| 934 | { | 934 | { |
| 935 | *conn_freed = false; | ||
| 936 | |||
| 935 | spin_lock_bh(&conn->state_lock); | 937 | spin_lock_bh(&conn->state_lock); |
| 936 | if (atomic_read(&conn->connection_exit)) { | 938 | if (atomic_read(&conn->connection_exit)) { |
| 937 | spin_unlock_bh(&conn->state_lock); | 939 | spin_unlock_bh(&conn->state_lock); |
| @@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) | |||
| 942 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { | 944 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { |
| 943 | spin_unlock_bh(&conn->state_lock); | 945 | spin_unlock_bh(&conn->state_lock); |
| 944 | iscsit_close_connection(conn); | 946 | iscsit_close_connection(conn); |
| 947 | *conn_freed = true; | ||
| 945 | return; | 948 | return; |
| 946 | } | 949 | } |
| 947 | 950 | ||
| @@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) | |||
| 955 | spin_unlock_bh(&conn->state_lock); | 958 | spin_unlock_bh(&conn->state_lock); |
| 956 | 959 | ||
| 957 | iscsit_handle_connection_cleanup(conn); | 960 | iscsit_handle_connection_cleanup(conn); |
| 961 | *conn_freed = true; | ||
| 958 | } | 962 | } |
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h index 60e69e2af6ed..3822d9cd1230 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.h +++ b/drivers/target/iscsi/iscsi_target_erl0.h | |||
| @@ -15,6 +15,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *); | |||
| 15 | extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); | 15 | extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); |
| 16 | extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); | 16 | extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); |
| 17 | extern void iscsit_fall_back_to_erl0(struct iscsi_session *); | 17 | extern void iscsit_fall_back_to_erl0(struct iscsi_session *); |
| 18 | extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *); | 18 | extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *); |
| 19 | 19 | ||
| 20 | #endif /*** ISCSI_TARGET_ERL0_H ***/ | 20 | #endif /*** ISCSI_TARGET_ERL0_H ***/ |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 66238477137b..92b96b51d506 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
| @@ -1464,5 +1464,9 @@ int iscsi_target_login_thread(void *arg) | |||
| 1464 | break; | 1464 | break; |
| 1465 | } | 1465 | } |
| 1466 | 1466 | ||
| 1467 | while (!kthread_should_stop()) { | ||
| 1468 | msleep(100); | ||
| 1469 | } | ||
| 1470 | |||
| 1467 | return 0; | 1471 | return 0; |
| 1468 | } | 1472 | } |
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 7ccc9c1cbfd1..6f88b31242b0 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c | |||
| @@ -493,14 +493,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn) | |||
| 493 | 493 | ||
| 494 | static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); | 494 | static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); |
| 495 | 495 | ||
| 496 | static bool iscsi_target_sk_state_check(struct sock *sk) | 496 | static bool __iscsi_target_sk_check_close(struct sock *sk) |
| 497 | { | 497 | { |
| 498 | if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { | 498 | if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { |
| 499 | pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," | 499 | pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE," |
| 500 | "returning FALSE\n"); | 500 | "returning FALSE\n"); |
| 501 | return false; | 501 | return true; |
| 502 | } | 502 | } |
| 503 | return true; | 503 | return false; |
| 504 | } | ||
| 505 | |||
| 506 | static bool iscsi_target_sk_check_close(struct iscsi_conn *conn) | ||
| 507 | { | ||
| 508 | bool state = false; | ||
| 509 | |||
| 510 | if (conn->sock) { | ||
| 511 | struct sock *sk = conn->sock->sk; | ||
| 512 | |||
| 513 | read_lock_bh(&sk->sk_callback_lock); | ||
| 514 | state = (__iscsi_target_sk_check_close(sk) || | ||
| 515 | test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); | ||
| 516 | read_unlock_bh(&sk->sk_callback_lock); | ||
| 517 | } | ||
| 518 | return state; | ||
| 519 | } | ||
| 520 | |||
| 521 | static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag) | ||
| 522 | { | ||
| 523 | bool state = false; | ||
| 524 | |||
| 525 | if (conn->sock) { | ||
| 526 | struct sock *sk = conn->sock->sk; | ||
| 527 | |||
| 528 | read_lock_bh(&sk->sk_callback_lock); | ||
| 529 | state = test_bit(flag, &conn->login_flags); | ||
| 530 | read_unlock_bh(&sk->sk_callback_lock); | ||
| 531 | } | ||
| 532 | return state; | ||
| 533 | } | ||
| 534 | |||
| 535 | static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag) | ||
| 536 | { | ||
| 537 | bool state = false; | ||
| 538 | |||
| 539 | if (conn->sock) { | ||
| 540 | struct sock *sk = conn->sock->sk; | ||
| 541 | |||
| 542 | write_lock_bh(&sk->sk_callback_lock); | ||
| 543 | state = (__iscsi_target_sk_check_close(sk) || | ||
| 544 | test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); | ||
| 545 | if (!state) | ||
| 546 | clear_bit(flag, &conn->login_flags); | ||
| 547 | write_unlock_bh(&sk->sk_callback_lock); | ||
| 548 | } | ||
| 549 | return state; | ||
| 504 | } | 550 | } |
| 505 | 551 | ||
| 506 | static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) | 552 | static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) |
| @@ -540,6 +586,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work) | |||
| 540 | 586 | ||
| 541 | pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", | 587 | pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", |
| 542 | conn, current->comm, current->pid); | 588 | conn, current->comm, current->pid); |
| 589 | /* | ||
| 590 | * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() | ||
| 591 | * before initial PDU processing in iscsi_target_start_negotiation() | ||
| 592 | * has completed, go ahead and retry until it's cleared. | ||
| 593 | * | ||
| 594 | * Otherwise if the TCP connection drops while this is occuring, | ||
| 595 | * iscsi_target_start_negotiation() will detect the failure, call | ||
| 596 | * cancel_delayed_work_sync(&conn->login_work), and cleanup the | ||
| 597 | * remaining iscsi connection resources from iscsi_np process context. | ||
| 598 | */ | ||
| 599 | if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) { | ||
| 600 | schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10)); | ||
| 601 | return; | ||
| 602 | } | ||
| 543 | 603 | ||
| 544 | spin_lock(&tpg->tpg_state_lock); | 604 | spin_lock(&tpg->tpg_state_lock); |
| 545 | state = (tpg->tpg_state == TPG_STATE_ACTIVE); | 605 | state = (tpg->tpg_state == TPG_STATE_ACTIVE); |
| @@ -547,26 +607,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work) | |||
| 547 | 607 | ||
| 548 | if (!state) { | 608 | if (!state) { |
| 549 | pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); | 609 | pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); |
| 550 | iscsi_target_restore_sock_callbacks(conn); | 610 | goto err; |
| 551 | iscsi_target_login_drop(conn, login); | ||
| 552 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
| 553 | return; | ||
| 554 | } | 611 | } |
| 555 | 612 | ||
| 556 | if (conn->sock) { | 613 | if (iscsi_target_sk_check_close(conn)) { |
| 557 | struct sock *sk = conn->sock->sk; | 614 | pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); |
| 558 | 615 | goto err; | |
| 559 | read_lock_bh(&sk->sk_callback_lock); | ||
| 560 | state = iscsi_target_sk_state_check(sk); | ||
| 561 | read_unlock_bh(&sk->sk_callback_lock); | ||
| 562 | |||
| 563 | if (!state) { | ||
| 564 | pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); | ||
| 565 | iscsi_target_restore_sock_callbacks(conn); | ||
| 566 | iscsi_target_login_drop(conn, login); | ||
| 567 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
| 568 | return; | ||
| 569 | } | ||
| 570 | } | 616 | } |
| 571 | 617 | ||
| 572 | conn->login_kworker = current; | 618 | conn->login_kworker = current; |
| @@ -584,34 +630,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work) | |||
| 584 | flush_signals(current); | 630 | flush_signals(current); |
| 585 | conn->login_kworker = NULL; | 631 | conn->login_kworker = NULL; |
| 586 | 632 | ||
| 587 | if (rc < 0) { | 633 | if (rc < 0) |
| 588 | iscsi_target_restore_sock_callbacks(conn); | 634 | goto err; |
| 589 | iscsi_target_login_drop(conn, login); | ||
| 590 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
| 591 | return; | ||
| 592 | } | ||
| 593 | 635 | ||
| 594 | pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", | 636 | pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", |
| 595 | conn, current->comm, current->pid); | 637 | conn, current->comm, current->pid); |
| 596 | 638 | ||
| 597 | rc = iscsi_target_do_login(conn, login); | 639 | rc = iscsi_target_do_login(conn, login); |
| 598 | if (rc < 0) { | 640 | if (rc < 0) { |
| 599 | iscsi_target_restore_sock_callbacks(conn); | 641 | goto err; |
| 600 | iscsi_target_login_drop(conn, login); | ||
| 601 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
| 602 | } else if (!rc) { | 642 | } else if (!rc) { |
| 603 | if (conn->sock) { | 643 | if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE)) |
| 604 | struct sock *sk = conn->sock->sk; | 644 | goto err; |
| 605 | |||
| 606 | write_lock_bh(&sk->sk_callback_lock); | ||
| 607 | clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); | ||
| 608 | write_unlock_bh(&sk->sk_callback_lock); | ||
| 609 | } | ||
| 610 | } else if (rc == 1) { | 645 | } else if (rc == 1) { |
| 611 | iscsi_target_nego_release(conn); | 646 | iscsi_target_nego_release(conn); |
| 612 | iscsi_post_login_handler(np, conn, zero_tsih); | 647 | iscsi_post_login_handler(np, conn, zero_tsih); |
| 613 | iscsit_deaccess_np(np, tpg, tpg_np); | 648 | iscsit_deaccess_np(np, tpg, tpg_np); |
| 614 | } | 649 | } |
| 650 | return; | ||
| 651 | |||
| 652 | err: | ||
| 653 | iscsi_target_restore_sock_callbacks(conn); | ||
| 654 | iscsi_target_login_drop(conn, login); | ||
| 655 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
| 615 | } | 656 | } |
| 616 | 657 | ||
| 617 | static void iscsi_target_do_cleanup(struct work_struct *work) | 658 | static void iscsi_target_do_cleanup(struct work_struct *work) |
| @@ -659,31 +700,54 @@ static void iscsi_target_sk_state_change(struct sock *sk) | |||
| 659 | orig_state_change(sk); | 700 | orig_state_change(sk); |
| 660 | return; | 701 | return; |
| 661 | } | 702 | } |
| 703 | state = __iscsi_target_sk_check_close(sk); | ||
| 704 | pr_debug("__iscsi_target_sk_close_change: state: %d\n", state); | ||
| 705 | |||
| 662 | if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { | 706 | if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { |
| 663 | pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" | 707 | pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" |
| 664 | " conn: %p\n", conn); | 708 | " conn: %p\n", conn); |
| 709 | if (state) | ||
| 710 | set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); | ||
| 665 | write_unlock_bh(&sk->sk_callback_lock); | 711 | write_unlock_bh(&sk->sk_callback_lock); |
| 666 | orig_state_change(sk); | 712 | orig_state_change(sk); |
| 667 | return; | 713 | return; |
| 668 | } | 714 | } |
| 669 | if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { | 715 | if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { |
| 670 | pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", | 716 | pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", |
| 671 | conn); | 717 | conn); |
| 672 | write_unlock_bh(&sk->sk_callback_lock); | 718 | write_unlock_bh(&sk->sk_callback_lock); |
| 673 | orig_state_change(sk); | 719 | orig_state_change(sk); |
| 674 | return; | 720 | return; |
| 675 | } | 721 | } |
| 722 | /* | ||
| 723 | * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED, | ||
| 724 | * but only queue conn->login_work -> iscsi_target_do_login_rx() | ||
| 725 | * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared. | ||
| 726 | * | ||
| 727 | * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close() | ||
| 728 | * will detect the dropped TCP connection from delayed workqueue context. | ||
| 729 | * | ||
| 730 | * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial | ||
| 731 | * iscsi_target_start_negotiation() is running, iscsi_target_do_login() | ||
| 732 | * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation() | ||
| 733 | * via iscsi_target_sk_check_and_clear() is responsible for detecting the | ||
| 734 | * dropped TCP connection in iscsi_np process context, and cleaning up | ||
| 735 | * the remaining iscsi connection resources. | ||
| 736 | */ | ||
| 737 | if (state) { | ||
| 738 | pr_debug("iscsi_target_sk_state_change got failed state\n"); | ||
| 739 | set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); | ||
| 740 | state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); | ||
| 741 | write_unlock_bh(&sk->sk_callback_lock); | ||
| 676 | 742 | ||
| 677 | state = iscsi_target_sk_state_check(sk); | 743 | orig_state_change(sk); |
| 678 | write_unlock_bh(&sk->sk_callback_lock); | ||
| 679 | |||
| 680 | pr_debug("iscsi_target_sk_state_change: state: %d\n", state); | ||
| 681 | 744 | ||
| 682 | if (!state) { | 745 | if (!state) |
| 683 | pr_debug("iscsi_target_sk_state_change got failed state\n"); | 746 | schedule_delayed_work(&conn->login_work, 0); |
| 684 | schedule_delayed_work(&conn->login_cleanup_work, 0); | ||
| 685 | return; | 747 | return; |
| 686 | } | 748 | } |
| 749 | write_unlock_bh(&sk->sk_callback_lock); | ||
| 750 | |||
| 687 | orig_state_change(sk); | 751 | orig_state_change(sk); |
| 688 | } | 752 | } |
| 689 | 753 | ||
| @@ -946,6 +1010,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo | |||
| 946 | if (iscsi_target_handle_csg_one(conn, login) < 0) | 1010 | if (iscsi_target_handle_csg_one(conn, login) < 0) |
| 947 | return -1; | 1011 | return -1; |
| 948 | if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { | 1012 | if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { |
| 1013 | /* | ||
| 1014 | * Check to make sure the TCP connection has not | ||
| 1015 | * dropped asynchronously while session reinstatement | ||
| 1016 | * was occuring in this kthread context, before | ||
| 1017 | * transitioning to full feature phase operation. | ||
| 1018 | */ | ||
| 1019 | if (iscsi_target_sk_check_close(conn)) | ||
| 1020 | return -1; | ||
| 1021 | |||
| 949 | login->tsih = conn->sess->tsih; | 1022 | login->tsih = conn->sess->tsih; |
| 950 | login->login_complete = 1; | 1023 | login->login_complete = 1; |
| 951 | iscsi_target_restore_sock_callbacks(conn); | 1024 | iscsi_target_restore_sock_callbacks(conn); |
| @@ -972,21 +1045,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo | |||
| 972 | break; | 1045 | break; |
| 973 | } | 1046 | } |
| 974 | 1047 | ||
| 975 | if (conn->sock) { | ||
| 976 | struct sock *sk = conn->sock->sk; | ||
| 977 | bool state; | ||
| 978 | |||
| 979 | read_lock_bh(&sk->sk_callback_lock); | ||
| 980 | state = iscsi_target_sk_state_check(sk); | ||
| 981 | read_unlock_bh(&sk->sk_callback_lock); | ||
| 982 | |||
| 983 | if (!state) { | ||
| 984 | pr_debug("iscsi_target_do_login() failed state for" | ||
| 985 | " conn: %p\n", conn); | ||
| 986 | return -1; | ||
| 987 | } | ||
| 988 | } | ||
| 989 | |||
| 990 | return 0; | 1048 | return 0; |
| 991 | } | 1049 | } |
| 992 | 1050 | ||
| @@ -1255,10 +1313,22 @@ int iscsi_target_start_negotiation( | |||
| 1255 | 1313 | ||
| 1256 | write_lock_bh(&sk->sk_callback_lock); | 1314 | write_lock_bh(&sk->sk_callback_lock); |
| 1257 | set_bit(LOGIN_FLAGS_READY, &conn->login_flags); | 1315 | set_bit(LOGIN_FLAGS_READY, &conn->login_flags); |
| 1316 | set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); | ||
| 1258 | write_unlock_bh(&sk->sk_callback_lock); | 1317 | write_unlock_bh(&sk->sk_callback_lock); |
| 1259 | } | 1318 | } |
| 1260 | 1319 | /* | |
| 1320 | * If iscsi_target_do_login returns zero to signal more PDU | ||
| 1321 | * exchanges are required to complete the login, go ahead and | ||
| 1322 | * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection | ||
| 1323 | * is still active. | ||
| 1324 | * | ||
| 1325 | * Otherwise if TCP connection dropped asynchronously, go ahead | ||
| 1326 | * and perform connection cleanup now. | ||
| 1327 | */ | ||
| 1261 | ret = iscsi_target_do_login(conn, login); | 1328 | ret = iscsi_target_do_login(conn, login); |
| 1329 | if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) | ||
| 1330 | ret = -1; | ||
| 1331 | |||
| 1262 | if (ret < 0) { | 1332 | if (ret < 0) { |
| 1263 | cancel_delayed_work_sync(&conn->login_work); | 1333 | cancel_delayed_work_sync(&conn->login_work); |
| 1264 | cancel_delayed_work_sync(&conn->login_cleanup_work); | 1334 | cancel_delayed_work_sync(&conn->login_cleanup_work); |
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 9ab7090f7c83..0912de7c0cf8 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h | |||
| @@ -136,7 +136,7 @@ int init_se_kmem_caches(void); | |||
| 136 | void release_se_kmem_caches(void); | 136 | void release_se_kmem_caches(void); |
| 137 | u32 scsi_get_new_index(scsi_index_t); | 137 | u32 scsi_get_new_index(scsi_index_t); |
| 138 | void transport_subsystem_check_init(void); | 138 | void transport_subsystem_check_init(void); |
| 139 | void transport_cmd_finish_abort(struct se_cmd *, int); | 139 | int transport_cmd_finish_abort(struct se_cmd *, int); |
| 140 | unsigned char *transport_dump_cmd_direction(struct se_cmd *); | 140 | unsigned char *transport_dump_cmd_direction(struct se_cmd *); |
| 141 | void transport_dump_dev_state(struct se_device *, char *, int *); | 141 | void transport_dump_dev_state(struct se_device *, char *, int *); |
| 142 | void transport_dump_dev_info(struct se_device *, struct se_lun *, | 142 | void transport_dump_dev_info(struct se_device *, struct se_lun *, |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index dce1e1b47316..13f47bf4d16b 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
| @@ -75,7 +75,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr) | |||
| 75 | kfree(tmr); | 75 | kfree(tmr); |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) | 78 | static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) |
| 79 | { | 79 | { |
| 80 | unsigned long flags; | 80 | unsigned long flags; |
| 81 | bool remove = true, send_tas; | 81 | bool remove = true, send_tas; |
| @@ -91,7 +91,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) | |||
| 91 | transport_send_task_abort(cmd); | 91 | transport_send_task_abort(cmd); |
| 92 | } | 92 | } |
| 93 | 93 | ||
| 94 | transport_cmd_finish_abort(cmd, remove); | 94 | return transport_cmd_finish_abort(cmd, remove); |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | static int target_check_cdb_and_preempt(struct list_head *list, | 97 | static int target_check_cdb_and_preempt(struct list_head *list, |
| @@ -184,8 +184,8 @@ void core_tmr_abort_task( | |||
| 184 | cancel_work_sync(&se_cmd->work); | 184 | cancel_work_sync(&se_cmd->work); |
| 185 | transport_wait_for_tasks(se_cmd); | 185 | transport_wait_for_tasks(se_cmd); |
| 186 | 186 | ||
| 187 | transport_cmd_finish_abort(se_cmd, true); | 187 | if (!transport_cmd_finish_abort(se_cmd, true)) |
| 188 | target_put_sess_cmd(se_cmd); | 188 | target_put_sess_cmd(se_cmd); |
| 189 | 189 | ||
| 190 | printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" | 190 | printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" |
| 191 | " ref_tag: %llu\n", ref_tag); | 191 | " ref_tag: %llu\n", ref_tag); |
| @@ -281,8 +281,8 @@ static void core_tmr_drain_tmr_list( | |||
| 281 | cancel_work_sync(&cmd->work); | 281 | cancel_work_sync(&cmd->work); |
| 282 | transport_wait_for_tasks(cmd); | 282 | transport_wait_for_tasks(cmd); |
| 283 | 283 | ||
| 284 | transport_cmd_finish_abort(cmd, 1); | 284 | if (!transport_cmd_finish_abort(cmd, 1)) |
| 285 | target_put_sess_cmd(cmd); | 285 | target_put_sess_cmd(cmd); |
| 286 | } | 286 | } |
| 287 | } | 287 | } |
| 288 | 288 | ||
| @@ -380,8 +380,8 @@ static void core_tmr_drain_state_list( | |||
| 380 | cancel_work_sync(&cmd->work); | 380 | cancel_work_sync(&cmd->work); |
| 381 | transport_wait_for_tasks(cmd); | 381 | transport_wait_for_tasks(cmd); |
| 382 | 382 | ||
| 383 | core_tmr_handle_tas_abort(cmd, tas); | 383 | if (!core_tmr_handle_tas_abort(cmd, tas)) |
| 384 | target_put_sess_cmd(cmd); | 384 | target_put_sess_cmd(cmd); |
| 385 | } | 385 | } |
| 386 | } | 386 | } |
| 387 | 387 | ||
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 37f57357d4a0..f1b3a46bdcaf 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
| @@ -651,9 +651,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) | |||
| 651 | percpu_ref_put(&lun->lun_ref); | 651 | percpu_ref_put(&lun->lun_ref); |
| 652 | } | 652 | } |
| 653 | 653 | ||
| 654 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | 654 | int transport_cmd_finish_abort(struct se_cmd *cmd, int remove) |
| 655 | { | 655 | { |
| 656 | bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); | 656 | bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); |
| 657 | int ret = 0; | ||
| 657 | 658 | ||
| 658 | if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) | 659 | if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) |
| 659 | transport_lun_remove_cmd(cmd); | 660 | transport_lun_remove_cmd(cmd); |
| @@ -665,9 +666,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | |||
| 665 | cmd->se_tfo->aborted_task(cmd); | 666 | cmd->se_tfo->aborted_task(cmd); |
| 666 | 667 | ||
| 667 | if (transport_cmd_check_stop_to_fabric(cmd)) | 668 | if (transport_cmd_check_stop_to_fabric(cmd)) |
| 668 | return; | 669 | return 1; |
| 669 | if (remove && ack_kref) | 670 | if (remove && ack_kref) |
| 670 | transport_put_cmd(cmd); | 671 | ret = transport_put_cmd(cmd); |
| 672 | |||
| 673 | return ret; | ||
| 671 | } | 674 | } |
| 672 | 675 | ||
| 673 | static void target_complete_failure_work(struct work_struct *work) | 676 | static void target_complete_failure_work(struct work_struct *work) |
| @@ -1160,15 +1163,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) | |||
| 1160 | if (cmd->unknown_data_length) { | 1163 | if (cmd->unknown_data_length) { |
| 1161 | cmd->data_length = size; | 1164 | cmd->data_length = size; |
| 1162 | } else if (size != cmd->data_length) { | 1165 | } else if (size != cmd->data_length) { |
| 1163 | pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" | 1166 | pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" |
| 1164 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" | 1167 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" |
| 1165 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), | 1168 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), |
| 1166 | cmd->data_length, size, cmd->t_task_cdb[0]); | 1169 | cmd->data_length, size, cmd->t_task_cdb[0]); |
| 1167 | 1170 | ||
| 1168 | if (cmd->data_direction == DMA_TO_DEVICE && | 1171 | if (cmd->data_direction == DMA_TO_DEVICE) { |
| 1169 | cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { | 1172 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { |
| 1170 | pr_err("Rejecting underflow/overflow WRITE data\n"); | 1173 | pr_err_ratelimited("Rejecting underflow/overflow" |
| 1171 | return TCM_INVALID_CDB_FIELD; | 1174 | " for WRITE data CDB\n"); |
| 1175 | return TCM_INVALID_CDB_FIELD; | ||
| 1176 | } | ||
| 1177 | /* | ||
| 1178 | * Some fabric drivers like iscsi-target still expect to | ||
| 1179 | * always reject overflow writes. Reject this case until | ||
| 1180 | * full fabric driver level support for overflow writes | ||
| 1181 | * is introduced tree-wide. | ||
| 1182 | */ | ||
| 1183 | if (size > cmd->data_length) { | ||
| 1184 | pr_err_ratelimited("Rejecting overflow for" | ||
| 1185 | " WRITE control CDB\n"); | ||
| 1186 | return TCM_INVALID_CDB_FIELD; | ||
| 1187 | } | ||
| 1172 | } | 1188 | } |
| 1173 | /* | 1189 | /* |
| 1174 | * Reject READ_* or WRITE_* with overflow/underflow for | 1190 | * Reject READ_* or WRITE_* with overflow/underflow for |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 9045837f748b..beb5f098f32d 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
| @@ -97,7 +97,7 @@ struct tcmu_hba { | |||
| 97 | 97 | ||
| 98 | struct tcmu_dev { | 98 | struct tcmu_dev { |
| 99 | struct list_head node; | 99 | struct list_head node; |
| 100 | 100 | struct kref kref; | |
| 101 | struct se_device se_dev; | 101 | struct se_device se_dev; |
| 102 | 102 | ||
| 103 | char *name; | 103 | char *name; |
| @@ -969,6 +969,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
| 969 | udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); | 969 | udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); |
| 970 | if (!udev) | 970 | if (!udev) |
| 971 | return NULL; | 971 | return NULL; |
| 972 | kref_init(&udev->kref); | ||
| 972 | 973 | ||
| 973 | udev->name = kstrdup(name, GFP_KERNEL); | 974 | udev->name = kstrdup(name, GFP_KERNEL); |
| 974 | if (!udev->name) { | 975 | if (!udev->name) { |
| @@ -1145,6 +1146,24 @@ static int tcmu_open(struct uio_info *info, struct inode *inode) | |||
| 1145 | return 0; | 1146 | return 0; |
| 1146 | } | 1147 | } |
| 1147 | 1148 | ||
| 1149 | static void tcmu_dev_call_rcu(struct rcu_head *p) | ||
| 1150 | { | ||
| 1151 | struct se_device *dev = container_of(p, struct se_device, rcu_head); | ||
| 1152 | struct tcmu_dev *udev = TCMU_DEV(dev); | ||
| 1153 | |||
| 1154 | kfree(udev->uio_info.name); | ||
| 1155 | kfree(udev->name); | ||
| 1156 | kfree(udev); | ||
| 1157 | } | ||
| 1158 | |||
| 1159 | static void tcmu_dev_kref_release(struct kref *kref) | ||
| 1160 | { | ||
| 1161 | struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); | ||
| 1162 | struct se_device *dev = &udev->se_dev; | ||
| 1163 | |||
| 1164 | call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); | ||
| 1165 | } | ||
| 1166 | |||
| 1148 | static int tcmu_release(struct uio_info *info, struct inode *inode) | 1167 | static int tcmu_release(struct uio_info *info, struct inode *inode) |
| 1149 | { | 1168 | { |
| 1150 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); | 1169 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); |
| @@ -1152,7 +1171,8 @@ static int tcmu_release(struct uio_info *info, struct inode *inode) | |||
| 1152 | clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); | 1171 | clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); |
| 1153 | 1172 | ||
| 1154 | pr_debug("close\n"); | 1173 | pr_debug("close\n"); |
| 1155 | 1174 | /* release ref from configure */ | |
| 1175 | kref_put(&udev->kref, tcmu_dev_kref_release); | ||
| 1156 | return 0; | 1176 | return 0; |
| 1157 | } | 1177 | } |
| 1158 | 1178 | ||
| @@ -1272,6 +1292,12 @@ static int tcmu_configure_device(struct se_device *dev) | |||
| 1272 | dev->dev_attrib.hw_max_sectors = 128; | 1292 | dev->dev_attrib.hw_max_sectors = 128; |
| 1273 | dev->dev_attrib.hw_queue_depth = 128; | 1293 | dev->dev_attrib.hw_queue_depth = 128; |
| 1274 | 1294 | ||
| 1295 | /* | ||
| 1296 | * Get a ref incase userspace does a close on the uio device before | ||
| 1297 | * LIO has initiated tcmu_free_device. | ||
| 1298 | */ | ||
| 1299 | kref_get(&udev->kref); | ||
| 1300 | |||
| 1275 | ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, | 1301 | ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, |
| 1276 | udev->uio_info.uio_dev->minor); | 1302 | udev->uio_info.uio_dev->minor); |
| 1277 | if (ret) | 1303 | if (ret) |
| @@ -1284,11 +1310,13 @@ static int tcmu_configure_device(struct se_device *dev) | |||
| 1284 | return 0; | 1310 | return 0; |
| 1285 | 1311 | ||
| 1286 | err_netlink: | 1312 | err_netlink: |
| 1313 | kref_put(&udev->kref, tcmu_dev_kref_release); | ||
| 1287 | uio_unregister_device(&udev->uio_info); | 1314 | uio_unregister_device(&udev->uio_info); |
| 1288 | err_register: | 1315 | err_register: |
| 1289 | vfree(udev->mb_addr); | 1316 | vfree(udev->mb_addr); |
| 1290 | err_vzalloc: | 1317 | err_vzalloc: |
| 1291 | kfree(info->name); | 1318 | kfree(info->name); |
| 1319 | info->name = NULL; | ||
| 1292 | 1320 | ||
| 1293 | return ret; | 1321 | return ret; |
| 1294 | } | 1322 | } |
| @@ -1302,14 +1330,6 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) | |||
| 1302 | return -EINVAL; | 1330 | return -EINVAL; |
| 1303 | } | 1331 | } |
| 1304 | 1332 | ||
| 1305 | static void tcmu_dev_call_rcu(struct rcu_head *p) | ||
| 1306 | { | ||
| 1307 | struct se_device *dev = container_of(p, struct se_device, rcu_head); | ||
| 1308 | struct tcmu_dev *udev = TCMU_DEV(dev); | ||
| 1309 | |||
| 1310 | kfree(udev); | ||
| 1311 | } | ||
| 1312 | |||
| 1313 | static bool tcmu_dev_configured(struct tcmu_dev *udev) | 1333 | static bool tcmu_dev_configured(struct tcmu_dev *udev) |
| 1314 | { | 1334 | { |
| 1315 | return udev->uio_info.uio_dev ? true : false; | 1335 | return udev->uio_info.uio_dev ? true : false; |
| @@ -1364,10 +1384,10 @@ static void tcmu_free_device(struct se_device *dev) | |||
| 1364 | udev->uio_info.uio_dev->minor); | 1384 | udev->uio_info.uio_dev->minor); |
| 1365 | 1385 | ||
| 1366 | uio_unregister_device(&udev->uio_info); | 1386 | uio_unregister_device(&udev->uio_info); |
| 1367 | kfree(udev->uio_info.name); | ||
| 1368 | kfree(udev->name); | ||
| 1369 | } | 1387 | } |
| 1370 | call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); | 1388 | |
| 1389 | /* release ref from init */ | ||
| 1390 | kref_put(&udev->kref, tcmu_dev_kref_release); | ||
| 1371 | } | 1391 | } |
| 1372 | 1392 | ||
| 1373 | enum { | 1393 | enum { |
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 4fb3165384c4..6b137194069f 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c | |||
| @@ -34,9 +34,7 @@ static int tty_port_default_receive_buf(struct tty_port *port, | |||
| 34 | if (!disc) | 34 | if (!disc) |
| 35 | return 0; | 35 | return 0; |
| 36 | 36 | ||
| 37 | mutex_lock(&tty->atomic_write_lock); | ||
| 38 | ret = tty_ldisc_receive_buf(disc, p, (char *)f, count); | 37 | ret = tty_ldisc_receive_buf(disc, p, (char *)f, count); |
| 39 | mutex_unlock(&tty->atomic_write_lock); | ||
| 40 | 38 | ||
| 41 | tty_ldisc_deref(disc); | 39 | tty_ldisc_deref(disc); |
| 42 | 40 | ||
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 9e217b1361ea..fe4fe2440729 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c | |||
| @@ -843,7 +843,10 @@ static ssize_t ci_role_show(struct device *dev, struct device_attribute *attr, | |||
| 843 | { | 843 | { |
| 844 | struct ci_hdrc *ci = dev_get_drvdata(dev); | 844 | struct ci_hdrc *ci = dev_get_drvdata(dev); |
| 845 | 845 | ||
| 846 | return sprintf(buf, "%s\n", ci_role(ci)->name); | 846 | if (ci->role != CI_ROLE_END) |
| 847 | return sprintf(buf, "%s\n", ci_role(ci)->name); | ||
| 848 | |||
| 849 | return 0; | ||
| 847 | } | 850 | } |
| 848 | 851 | ||
| 849 | static ssize_t ci_role_store(struct device *dev, | 852 | static ssize_t ci_role_store(struct device *dev, |
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c index 6d23eede4d8c..1c31e8a08810 100644 --- a/drivers/usb/chipidea/debug.c +++ b/drivers/usb/chipidea/debug.c | |||
| @@ -294,7 +294,8 @@ static int ci_role_show(struct seq_file *s, void *data) | |||
| 294 | { | 294 | { |
| 295 | struct ci_hdrc *ci = s->private; | 295 | struct ci_hdrc *ci = s->private; |
| 296 | 296 | ||
| 297 | seq_printf(s, "%s\n", ci_role(ci)->name); | 297 | if (ci->role != CI_ROLE_END) |
| 298 | seq_printf(s, "%s\n", ci_role(ci)->name); | ||
| 298 | 299 | ||
| 299 | return 0; | 300 | return 0; |
| 300 | } | 301 | } |
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 56d2d3213076..d68b125796f9 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c | |||
| @@ -1993,6 +1993,7 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci) | |||
| 1993 | int ci_hdrc_gadget_init(struct ci_hdrc *ci) | 1993 | int ci_hdrc_gadget_init(struct ci_hdrc *ci) |
| 1994 | { | 1994 | { |
| 1995 | struct ci_role_driver *rdrv; | 1995 | struct ci_role_driver *rdrv; |
| 1996 | int ret; | ||
| 1996 | 1997 | ||
| 1997 | if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC)) | 1998 | if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC)) |
| 1998 | return -ENXIO; | 1999 | return -ENXIO; |
| @@ -2005,7 +2006,10 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci) | |||
| 2005 | rdrv->stop = udc_id_switch_for_host; | 2006 | rdrv->stop = udc_id_switch_for_host; |
| 2006 | rdrv->irq = udc_irq; | 2007 | rdrv->irq = udc_irq; |
| 2007 | rdrv->name = "gadget"; | 2008 | rdrv->name = "gadget"; |
| 2008 | ci->roles[CI_ROLE_GADGET] = rdrv; | ||
| 2009 | 2009 | ||
| 2010 | return udc_start(ci); | 2010 | ret = udc_start(ci); |
| 2011 | if (!ret) | ||
| 2012 | ci->roles[CI_ROLE_GADGET] = rdrv; | ||
| 2013 | |||
| 2014 | return ret; | ||
| 2011 | } | 2015 | } |
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index e77a4ed4f021..9f4a0185dd60 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c | |||
| @@ -108,6 +108,8 @@ struct imx_usbmisc { | |||
| 108 | const struct usbmisc_ops *ops; | 108 | const struct usbmisc_ops *ops; |
| 109 | }; | 109 | }; |
| 110 | 110 | ||
| 111 | static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data); | ||
| 112 | |||
| 111 | static int usbmisc_imx25_init(struct imx_usbmisc_data *data) | 113 | static int usbmisc_imx25_init(struct imx_usbmisc_data *data) |
| 112 | { | 114 | { |
| 113 | struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); | 115 | struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); |
| @@ -242,10 +244,15 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data) | |||
| 242 | val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN | 244 | val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN |
| 243 | | MX53_USB_UHx_CTRL_ULPI_INT_EN; | 245 | | MX53_USB_UHx_CTRL_ULPI_INT_EN; |
| 244 | writel(val, reg); | 246 | writel(val, reg); |
| 245 | /* Disable internal 60Mhz clock */ | 247 | if (is_imx53_usbmisc(data)) { |
| 246 | reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET; | 248 | /* Disable internal 60Mhz clock */ |
| 247 | val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF; | 249 | reg = usbmisc->base + |
| 248 | writel(val, reg); | 250 | MX53_USB_CLKONOFF_CTRL_OFFSET; |
| 251 | val = readl(reg) | | ||
| 252 | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF; | ||
| 253 | writel(val, reg); | ||
| 254 | } | ||
| 255 | |||
| 249 | } | 256 | } |
| 250 | if (data->disable_oc) { | 257 | if (data->disable_oc) { |
| 251 | reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET; | 258 | reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET; |
| @@ -267,10 +274,15 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data) | |||
| 267 | val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN | 274 | val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN |
| 268 | | MX53_USB_UHx_CTRL_ULPI_INT_EN; | 275 | | MX53_USB_UHx_CTRL_ULPI_INT_EN; |
| 269 | writel(val, reg); | 276 | writel(val, reg); |
| 270 | /* Disable internal 60Mhz clock */ | 277 | |
| 271 | reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET; | 278 | if (is_imx53_usbmisc(data)) { |
| 272 | val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF; | 279 | /* Disable internal 60Mhz clock */ |
| 273 | writel(val, reg); | 280 | reg = usbmisc->base + |
| 281 | MX53_USB_CLKONOFF_CTRL_OFFSET; | ||
| 282 | val = readl(reg) | | ||
| 283 | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF; | ||
| 284 | writel(val, reg); | ||
| 285 | } | ||
| 274 | } | 286 | } |
| 275 | if (data->disable_oc) { | 287 | if (data->disable_oc) { |
| 276 | reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET; | 288 | reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET; |
| @@ -456,6 +468,10 @@ static const struct usbmisc_ops imx27_usbmisc_ops = { | |||
| 456 | .init = usbmisc_imx27_init, | 468 | .init = usbmisc_imx27_init, |
| 457 | }; | 469 | }; |
| 458 | 470 | ||
| 471 | static const struct usbmisc_ops imx51_usbmisc_ops = { | ||
| 472 | .init = usbmisc_imx53_init, | ||
| 473 | }; | ||
| 474 | |||
| 459 | static const struct usbmisc_ops imx53_usbmisc_ops = { | 475 | static const struct usbmisc_ops imx53_usbmisc_ops = { |
| 460 | .init = usbmisc_imx53_init, | 476 | .init = usbmisc_imx53_init, |
| 461 | }; | 477 | }; |
| @@ -479,6 +495,13 @@ static const struct usbmisc_ops imx7d_usbmisc_ops = { | |||
| 479 | .set_wakeup = usbmisc_imx7d_set_wakeup, | 495 | .set_wakeup = usbmisc_imx7d_set_wakeup, |
| 480 | }; | 496 | }; |
| 481 | 497 | ||
| 498 | static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data) | ||
| 499 | { | ||
| 500 | struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); | ||
| 501 | |||
| 502 | return usbmisc->ops == &imx53_usbmisc_ops; | ||
| 503 | } | ||
| 504 | |||
| 482 | int imx_usbmisc_init(struct imx_usbmisc_data *data) | 505 | int imx_usbmisc_init(struct imx_usbmisc_data *data) |
| 483 | { | 506 | { |
| 484 | struct imx_usbmisc *usbmisc; | 507 | struct imx_usbmisc *usbmisc; |
| @@ -536,7 +559,7 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = { | |||
| 536 | }, | 559 | }, |
| 537 | { | 560 | { |
| 538 | .compatible = "fsl,imx51-usbmisc", | 561 | .compatible = "fsl,imx51-usbmisc", |
| 539 | .data = &imx53_usbmisc_ops, | 562 | .data = &imx51_usbmisc_ops, |
| 540 | }, | 563 | }, |
| 541 | { | 564 | { |
| 542 | .compatible = "fsl,imx53-usbmisc", | 565 | .compatible = "fsl,imx53-usbmisc", |
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 9cd8722f24f6..a3ffe97170ff 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c | |||
| @@ -144,6 +144,8 @@ const struct of_device_id dwc2_of_match_table[] = { | |||
| 144 | { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params }, | 144 | { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params }, |
| 145 | { .compatible = "snps,dwc2" }, | 145 | { .compatible = "snps,dwc2" }, |
| 146 | { .compatible = "samsung,s3c6400-hsotg" }, | 146 | { .compatible = "samsung,s3c6400-hsotg" }, |
| 147 | { .compatible = "amlogic,meson8-usb", | ||
| 148 | .data = dwc2_set_amlogic_params }, | ||
| 147 | { .compatible = "amlogic,meson8b-usb", | 149 | { .compatible = "amlogic,meson8b-usb", |
| 148 | .data = dwc2_set_amlogic_params }, | 150 | .data = dwc2_set_amlogic_params }, |
| 149 | { .compatible = "amlogic,meson-gxbb-usb", | 151 | { .compatible = "amlogic,meson-gxbb-usb", |
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 49d685ad0da9..45b554032332 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c | |||
| @@ -315,6 +315,9 @@ void usb_remove_function(struct usb_configuration *c, struct usb_function *f) | |||
| 315 | list_del(&f->list); | 315 | list_del(&f->list); |
| 316 | if (f->unbind) | 316 | if (f->unbind) |
| 317 | f->unbind(c, f); | 317 | f->unbind(c, f); |
| 318 | |||
| 319 | if (f->bind_deactivated) | ||
| 320 | usb_function_activate(f); | ||
| 318 | } | 321 | } |
| 319 | EXPORT_SYMBOL_GPL(usb_remove_function); | 322 | EXPORT_SYMBOL_GPL(usb_remove_function); |
| 320 | 323 | ||
| @@ -956,12 +959,8 @@ static void remove_config(struct usb_composite_dev *cdev, | |||
| 956 | 959 | ||
| 957 | f = list_first_entry(&config->functions, | 960 | f = list_first_entry(&config->functions, |
| 958 | struct usb_function, list); | 961 | struct usb_function, list); |
| 959 | list_del(&f->list); | 962 | |
| 960 | if (f->unbind) { | 963 | usb_remove_function(config, f); |
| 961 | DBG(cdev, "unbind function '%s'/%p\n", f->name, f); | ||
| 962 | f->unbind(config, f); | ||
| 963 | /* may free memory for "f" */ | ||
| 964 | } | ||
| 965 | } | 964 | } |
| 966 | list_del(&config->list); | 965 | list_del(&config->list); |
| 967 | if (config->unbind) { | 966 | if (config->unbind) { |
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 4c8aacc232c0..74d57d6994da 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c | |||
| @@ -396,7 +396,11 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) | |||
| 396 | /* Caller must hold fsg->lock */ | 396 | /* Caller must hold fsg->lock */ |
| 397 | static void wakeup_thread(struct fsg_common *common) | 397 | static void wakeup_thread(struct fsg_common *common) |
| 398 | { | 398 | { |
| 399 | smp_wmb(); /* ensure the write of bh->state is complete */ | 399 | /* |
| 400 | * Ensure the reading of thread_wakeup_needed | ||
| 401 | * and the writing of bh->state are completed | ||
| 402 | */ | ||
| 403 | smp_mb(); | ||
| 400 | /* Tell the main thread that something has happened */ | 404 | /* Tell the main thread that something has happened */ |
| 401 | common->thread_wakeup_needed = 1; | 405 | common->thread_wakeup_needed = 1; |
| 402 | if (common->thread_task) | 406 | if (common->thread_task) |
| @@ -627,7 +631,12 @@ static int sleep_thread(struct fsg_common *common, bool can_freeze) | |||
| 627 | } | 631 | } |
| 628 | __set_current_state(TASK_RUNNING); | 632 | __set_current_state(TASK_RUNNING); |
| 629 | common->thread_wakeup_needed = 0; | 633 | common->thread_wakeup_needed = 0; |
| 630 | smp_rmb(); /* ensure the latest bh->state is visible */ | 634 | |
| 635 | /* | ||
| 636 | * Ensure the writing of thread_wakeup_needed | ||
| 637 | * and the reading of bh->state are completed | ||
| 638 | */ | ||
| 639 | smp_mb(); | ||
| 631 | return rc; | 640 | return rc; |
| 632 | } | 641 | } |
| 633 | 642 | ||
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index b4058f0000e4..6a1ce6a55158 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c | |||
| @@ -281,7 +281,7 @@ static void pn_net_setup(struct net_device *dev) | |||
| 281 | dev->tx_queue_len = 1; | 281 | dev->tx_queue_len = 1; |
| 282 | 282 | ||
| 283 | dev->netdev_ops = &pn_netdev_ops; | 283 | dev->netdev_ops = &pn_netdev_ops; |
| 284 | dev->destructor = free_netdev; | 284 | dev->needs_free_netdev = true; |
| 285 | dev->header_ops = &phonet_header_ops; | 285 | dev->header_ops = &phonet_header_ops; |
| 286 | } | 286 | } |
| 287 | 287 | ||
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index b9ca0a26cbd9..684900fcfe24 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
| @@ -1183,8 +1183,10 @@ dev_release (struct inode *inode, struct file *fd) | |||
| 1183 | 1183 | ||
| 1184 | /* closing ep0 === shutdown all */ | 1184 | /* closing ep0 === shutdown all */ |
| 1185 | 1185 | ||
| 1186 | if (dev->gadget_registered) | 1186 | if (dev->gadget_registered) { |
| 1187 | usb_gadget_unregister_driver (&gadgetfs_driver); | 1187 | usb_gadget_unregister_driver (&gadgetfs_driver); |
| 1188 | dev->gadget_registered = false; | ||
| 1189 | } | ||
| 1188 | 1190 | ||
| 1189 | /* at this point "good" hardware has disconnected the | 1191 | /* at this point "good" hardware has disconnected the |
| 1190 | * device from USB; the host won't see it any more. | 1192 | * device from USB; the host won't see it any more. |
| @@ -1677,9 +1679,10 @@ static void | |||
| 1677 | gadgetfs_suspend (struct usb_gadget *gadget) | 1679 | gadgetfs_suspend (struct usb_gadget *gadget) |
| 1678 | { | 1680 | { |
| 1679 | struct dev_data *dev = get_gadget_data (gadget); | 1681 | struct dev_data *dev = get_gadget_data (gadget); |
| 1682 | unsigned long flags; | ||
| 1680 | 1683 | ||
| 1681 | INFO (dev, "suspended from state %d\n", dev->state); | 1684 | INFO (dev, "suspended from state %d\n", dev->state); |
| 1682 | spin_lock (&dev->lock); | 1685 | spin_lock_irqsave(&dev->lock, flags); |
| 1683 | switch (dev->state) { | 1686 | switch (dev->state) { |
| 1684 | case STATE_DEV_SETUP: // VERY odd... host died?? | 1687 | case STATE_DEV_SETUP: // VERY odd... host died?? |
| 1685 | case STATE_DEV_CONNECTED: | 1688 | case STATE_DEV_CONNECTED: |
| @@ -1690,7 +1693,7 @@ gadgetfs_suspend (struct usb_gadget *gadget) | |||
| 1690 | default: | 1693 | default: |
| 1691 | break; | 1694 | break; |
| 1692 | } | 1695 | } |
| 1693 | spin_unlock (&dev->lock); | 1696 | spin_unlock_irqrestore(&dev->lock, flags); |
| 1694 | } | 1697 | } |
| 1695 | 1698 | ||
| 1696 | static struct usb_gadget_driver gadgetfs_driver = { | 1699 | static struct usb_gadget_driver gadgetfs_driver = { |
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index ccabb51cb98d..7635fd7cc328 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c | |||
| @@ -442,23 +442,16 @@ static void set_link_state(struct dummy_hcd *dum_hcd) | |||
| 442 | /* Report reset and disconnect events to the driver */ | 442 | /* Report reset and disconnect events to the driver */ |
| 443 | if (dum->driver && (disconnect || reset)) { | 443 | if (dum->driver && (disconnect || reset)) { |
| 444 | stop_activity(dum); | 444 | stop_activity(dum); |
| 445 | spin_unlock(&dum->lock); | ||
| 446 | if (reset) | 445 | if (reset) |
| 447 | usb_gadget_udc_reset(&dum->gadget, dum->driver); | 446 | usb_gadget_udc_reset(&dum->gadget, dum->driver); |
| 448 | else | 447 | else |
| 449 | dum->driver->disconnect(&dum->gadget); | 448 | dum->driver->disconnect(&dum->gadget); |
| 450 | spin_lock(&dum->lock); | ||
| 451 | } | 449 | } |
| 452 | } else if (dum_hcd->active != dum_hcd->old_active) { | 450 | } else if (dum_hcd->active != dum_hcd->old_active) { |
| 453 | if (dum_hcd->old_active && dum->driver->suspend) { | 451 | if (dum_hcd->old_active && dum->driver->suspend) |
| 454 | spin_unlock(&dum->lock); | ||
| 455 | dum->driver->suspend(&dum->gadget); | 452 | dum->driver->suspend(&dum->gadget); |
| 456 | spin_lock(&dum->lock); | 453 | else if (!dum_hcd->old_active && dum->driver->resume) |
| 457 | } else if (!dum_hcd->old_active && dum->driver->resume) { | ||
| 458 | spin_unlock(&dum->lock); | ||
| 459 | dum->driver->resume(&dum->gadget); | 454 | dum->driver->resume(&dum->gadget); |
| 460 | spin_lock(&dum->lock); | ||
| 461 | } | ||
| 462 | } | 455 | } |
| 463 | 456 | ||
| 464 | dum_hcd->old_status = dum_hcd->port_status; | 457 | dum_hcd->old_status = dum_hcd->port_status; |
| @@ -983,7 +976,9 @@ static int dummy_udc_stop(struct usb_gadget *g) | |||
| 983 | struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); | 976 | struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); |
| 984 | struct dummy *dum = dum_hcd->dum; | 977 | struct dummy *dum = dum_hcd->dum; |
| 985 | 978 | ||
| 979 | spin_lock_irq(&dum->lock); | ||
| 986 | dum->driver = NULL; | 980 | dum->driver = NULL; |
| 981 | spin_unlock_irq(&dum->lock); | ||
| 987 | 982 | ||
| 988 | return 0; | 983 | return 0; |
| 989 | } | 984 | } |
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 6cf07857eaca..f2cbd7f8005e 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c | |||
| @@ -2470,11 +2470,8 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver) | |||
| 2470 | nuke(&dev->ep[i]); | 2470 | nuke(&dev->ep[i]); |
| 2471 | 2471 | ||
| 2472 | /* report disconnect; the driver is already quiesced */ | 2472 | /* report disconnect; the driver is already quiesced */ |
| 2473 | if (driver) { | 2473 | if (driver) |
| 2474 | spin_unlock(&dev->lock); | ||
| 2475 | driver->disconnect(&dev->gadget); | 2474 | driver->disconnect(&dev->gadget); |
| 2476 | spin_lock(&dev->lock); | ||
| 2477 | } | ||
| 2478 | 2475 | ||
| 2479 | usb_reinit(dev); | 2476 | usb_reinit(dev); |
| 2480 | } | 2477 | } |
| @@ -3348,8 +3345,6 @@ next_endpoints: | |||
| 3348 | BIT(PCI_RETRY_ABORT_INTERRUPT)) | 3345 | BIT(PCI_RETRY_ABORT_INTERRUPT)) |
| 3349 | 3346 | ||
| 3350 | static void handle_stat1_irqs(struct net2280 *dev, u32 stat) | 3347 | static void handle_stat1_irqs(struct net2280 *dev, u32 stat) |
| 3351 | __releases(dev->lock) | ||
| 3352 | __acquires(dev->lock) | ||
| 3353 | { | 3348 | { |
| 3354 | struct net2280_ep *ep; | 3349 | struct net2280_ep *ep; |
| 3355 | u32 tmp, num, mask, scratch; | 3350 | u32 tmp, num, mask, scratch; |
| @@ -3390,14 +3385,12 @@ __acquires(dev->lock) | |||
| 3390 | if (disconnect || reset) { | 3385 | if (disconnect || reset) { |
| 3391 | stop_activity(dev, dev->driver); | 3386 | stop_activity(dev, dev->driver); |
| 3392 | ep0_start(dev); | 3387 | ep0_start(dev); |
| 3393 | spin_unlock(&dev->lock); | ||
| 3394 | if (reset) | 3388 | if (reset) |
| 3395 | usb_gadget_udc_reset | 3389 | usb_gadget_udc_reset |
| 3396 | (&dev->gadget, dev->driver); | 3390 | (&dev->gadget, dev->driver); |
| 3397 | else | 3391 | else |
| 3398 | (dev->driver->disconnect) | 3392 | (dev->driver->disconnect) |
| 3399 | (&dev->gadget); | 3393 | (&dev->gadget); |
| 3400 | spin_lock(&dev->lock); | ||
| 3401 | return; | 3394 | return; |
| 3402 | } | 3395 | } |
| 3403 | } | 3396 | } |
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 5a2d845fb1a6..cd4c88529721 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c | |||
| @@ -623,7 +623,6 @@ static void renesas_usb3_stop_controller(struct renesas_usb3 *usb3) | |||
| 623 | { | 623 | { |
| 624 | usb3_disconnect(usb3); | 624 | usb3_disconnect(usb3); |
| 625 | usb3_write(usb3, 0, USB3_P0_INT_ENA); | 625 | usb3_write(usb3, 0, USB3_P0_INT_ENA); |
| 626 | usb3_write(usb3, 0, USB3_PN_INT_ENA); | ||
| 627 | usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA); | 626 | usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA); |
| 628 | usb3_write(usb3, 0, USB3_USB_INT_ENA_1); | 627 | usb3_write(usb3, 0, USB3_USB_INT_ENA_1); |
| 629 | usb3_write(usb3, 0, USB3_USB_INT_ENA_2); | 628 | usb3_write(usb3, 0, USB3_USB_INT_ENA_2); |
| @@ -1475,7 +1474,13 @@ static void usb3_request_done_pipen(struct renesas_usb3 *usb3, | |||
| 1475 | struct renesas_usb3_request *usb3_req, | 1474 | struct renesas_usb3_request *usb3_req, |
| 1476 | int status) | 1475 | int status) |
| 1477 | { | 1476 | { |
| 1478 | usb3_pn_stop(usb3); | 1477 | unsigned long flags; |
| 1478 | |||
| 1479 | spin_lock_irqsave(&usb3->lock, flags); | ||
| 1480 | if (usb3_pn_change(usb3, usb3_ep->num)) | ||
| 1481 | usb3_pn_stop(usb3); | ||
| 1482 | spin_unlock_irqrestore(&usb3->lock, flags); | ||
| 1483 | |||
| 1479 | usb3_disable_pipe_irq(usb3, usb3_ep->num); | 1484 | usb3_disable_pipe_irq(usb3, usb3_ep->num); |
| 1480 | usb3_request_done(usb3_ep, usb3_req, status); | 1485 | usb3_request_done(usb3_ep, usb3_req, status); |
| 1481 | 1486 | ||
| @@ -1504,30 +1509,46 @@ static void usb3_irq_epc_pipen_bfrdy(struct renesas_usb3 *usb3, int num) | |||
| 1504 | { | 1509 | { |
| 1505 | struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num); | 1510 | struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num); |
| 1506 | struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep); | 1511 | struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep); |
| 1512 | bool done = false; | ||
| 1507 | 1513 | ||
| 1508 | if (!usb3_req) | 1514 | if (!usb3_req) |
| 1509 | return; | 1515 | return; |
| 1510 | 1516 | ||
| 1517 | spin_lock(&usb3->lock); | ||
| 1518 | if (usb3_pn_change(usb3, num)) | ||
| 1519 | goto out; | ||
| 1520 | |||
| 1511 | if (usb3_ep->dir_in) { | 1521 | if (usb3_ep->dir_in) { |
| 1512 | /* Do not stop the IN pipe here to detect LSTTR interrupt */ | 1522 | /* Do not stop the IN pipe here to detect LSTTR interrupt */ |
| 1513 | if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE)) | 1523 | if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE)) |
| 1514 | usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA); | 1524 | usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA); |
| 1515 | } else { | 1525 | } else { |
| 1516 | if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ)) | 1526 | if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ)) |
| 1517 | usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0); | 1527 | done = true; |
| 1518 | } | 1528 | } |
| 1529 | |||
| 1530 | out: | ||
| 1531 | /* need to unlock because usb3_request_done_pipen() locks it */ | ||
| 1532 | spin_unlock(&usb3->lock); | ||
| 1533 | |||
| 1534 | if (done) | ||
| 1535 | usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0); | ||
| 1519 | } | 1536 | } |
| 1520 | 1537 | ||
| 1521 | static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num) | 1538 | static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num) |
| 1522 | { | 1539 | { |
| 1523 | u32 pn_int_sta; | 1540 | u32 pn_int_sta; |
| 1524 | 1541 | ||
| 1525 | if (usb3_pn_change(usb3, num) < 0) | 1542 | spin_lock(&usb3->lock); |
| 1543 | if (usb3_pn_change(usb3, num) < 0) { | ||
| 1544 | spin_unlock(&usb3->lock); | ||
| 1526 | return; | 1545 | return; |
| 1546 | } | ||
| 1527 | 1547 | ||
| 1528 | pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA); | 1548 | pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA); |
| 1529 | pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA); | 1549 | pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA); |
| 1530 | usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA); | 1550 | usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA); |
| 1551 | spin_unlock(&usb3->lock); | ||
| 1531 | if (pn_int_sta & PN_INT_LSTTR) | 1552 | if (pn_int_sta & PN_INT_LSTTR) |
| 1532 | usb3_irq_epc_pipen_lsttr(usb3, num); | 1553 | usb3_irq_epc_pipen_lsttr(usb3, num); |
| 1533 | if (pn_int_sta & PN_INT_BFRDY) | 1554 | if (pn_int_sta & PN_INT_BFRDY) |
| @@ -1660,6 +1681,7 @@ static int usb3_disable_pipe_n(struct renesas_usb3_ep *usb3_ep) | |||
| 1660 | 1681 | ||
| 1661 | spin_lock_irqsave(&usb3->lock, flags); | 1682 | spin_lock_irqsave(&usb3->lock, flags); |
| 1662 | if (!usb3_pn_change(usb3, usb3_ep->num)) { | 1683 | if (!usb3_pn_change(usb3, usb3_ep->num)) { |
| 1684 | usb3_write(usb3, 0, USB3_PN_INT_ENA); | ||
| 1663 | usb3_write(usb3, 0, USB3_PN_RAMMAP); | 1685 | usb3_write(usb3, 0, USB3_PN_RAMMAP); |
| 1664 | usb3_clear_bit(usb3, PN_CON_EN, USB3_PN_CON); | 1686 | usb3_clear_bit(usb3, PN_CON_EN, USB3_PN_CON); |
| 1665 | } | 1687 | } |
| @@ -1799,6 +1821,9 @@ static int renesas_usb3_start(struct usb_gadget *gadget, | |||
| 1799 | /* hook up the driver */ | 1821 | /* hook up the driver */ |
| 1800 | usb3->driver = driver; | 1822 | usb3->driver = driver; |
| 1801 | 1823 | ||
| 1824 | pm_runtime_enable(usb3_to_dev(usb3)); | ||
| 1825 | pm_runtime_get_sync(usb3_to_dev(usb3)); | ||
| 1826 | |||
| 1802 | renesas_usb3_init_controller(usb3); | 1827 | renesas_usb3_init_controller(usb3); |
| 1803 | 1828 | ||
| 1804 | return 0; | 1829 | return 0; |
| @@ -1807,14 +1832,14 @@ static int renesas_usb3_start(struct usb_gadget *gadget, | |||
| 1807 | static int renesas_usb3_stop(struct usb_gadget *gadget) | 1832 | static int renesas_usb3_stop(struct usb_gadget *gadget) |
| 1808 | { | 1833 | { |
| 1809 | struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget); | 1834 | struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget); |
| 1810 | unsigned long flags; | ||
| 1811 | 1835 | ||
| 1812 | spin_lock_irqsave(&usb3->lock, flags); | ||
| 1813 | usb3->softconnect = false; | 1836 | usb3->softconnect = false; |
| 1814 | usb3->gadget.speed = USB_SPEED_UNKNOWN; | 1837 | usb3->gadget.speed = USB_SPEED_UNKNOWN; |
| 1815 | usb3->driver = NULL; | 1838 | usb3->driver = NULL; |
| 1816 | renesas_usb3_stop_controller(usb3); | 1839 | renesas_usb3_stop_controller(usb3); |
| 1817 | spin_unlock_irqrestore(&usb3->lock, flags); | 1840 | |
| 1841 | pm_runtime_put(usb3_to_dev(usb3)); | ||
| 1842 | pm_runtime_disable(usb3_to_dev(usb3)); | ||
| 1818 | 1843 | ||
| 1819 | return 0; | 1844 | return 0; |
| 1820 | } | 1845 | } |
| @@ -1891,9 +1916,6 @@ static int renesas_usb3_remove(struct platform_device *pdev) | |||
| 1891 | 1916 | ||
| 1892 | device_remove_file(&pdev->dev, &dev_attr_role); | 1917 | device_remove_file(&pdev->dev, &dev_attr_role); |
| 1893 | 1918 | ||
| 1894 | pm_runtime_put(&pdev->dev); | ||
| 1895 | pm_runtime_disable(&pdev->dev); | ||
| 1896 | |||
| 1897 | usb_del_gadget_udc(&usb3->gadget); | 1919 | usb_del_gadget_udc(&usb3->gadget); |
| 1898 | 1920 | ||
| 1899 | __renesas_usb3_ep_free_request(usb3->ep0_req); | 1921 | __renesas_usb3_ep_free_request(usb3->ep0_req); |
| @@ -2099,9 +2121,6 @@ static int renesas_usb3_probe(struct platform_device *pdev) | |||
| 2099 | 2121 | ||
| 2100 | usb3->workaround_for_vbus = priv->workaround_for_vbus; | 2122 | usb3->workaround_for_vbus = priv->workaround_for_vbus; |
| 2101 | 2123 | ||
| 2102 | pm_runtime_enable(&pdev->dev); | ||
| 2103 | pm_runtime_get_sync(&pdev->dev); | ||
| 2104 | |||
| 2105 | dev_info(&pdev->dev, "probed\n"); | 2124 | dev_info(&pdev->dev, "probed\n"); |
| 2106 | 2125 | ||
| 2107 | return 0; | 2126 | return 0; |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 1f1687e888d6..fddf2731f798 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
| @@ -2119,11 +2119,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, | |||
| 2119 | { | 2119 | { |
| 2120 | u32 temp, port_offset, port_count; | 2120 | u32 temp, port_offset, port_count; |
| 2121 | int i; | 2121 | int i; |
| 2122 | u8 major_revision; | 2122 | u8 major_revision, minor_revision; |
| 2123 | struct xhci_hub *rhub; | 2123 | struct xhci_hub *rhub; |
| 2124 | 2124 | ||
| 2125 | temp = readl(addr); | 2125 | temp = readl(addr); |
| 2126 | major_revision = XHCI_EXT_PORT_MAJOR(temp); | 2126 | major_revision = XHCI_EXT_PORT_MAJOR(temp); |
| 2127 | minor_revision = XHCI_EXT_PORT_MINOR(temp); | ||
| 2127 | 2128 | ||
| 2128 | if (major_revision == 0x03) { | 2129 | if (major_revision == 0x03) { |
| 2129 | rhub = &xhci->usb3_rhub; | 2130 | rhub = &xhci->usb3_rhub; |
| @@ -2137,7 +2138,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, | |||
| 2137 | return; | 2138 | return; |
| 2138 | } | 2139 | } |
| 2139 | rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); | 2140 | rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); |
| 2140 | rhub->min_rev = XHCI_EXT_PORT_MINOR(temp); | 2141 | |
| 2142 | if (rhub->min_rev < minor_revision) | ||
| 2143 | rhub->min_rev = minor_revision; | ||
| 2141 | 2144 | ||
| 2142 | /* Port offset and count in the third dword, see section 7.2 */ | 2145 | /* Port offset and count in the third dword, see section 7.2 */ |
| 2143 | temp = readl(addr + 2); | 2146 | temp = readl(addr + 2); |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index fcf1f3f63e7a..1bcf971141c0 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
| @@ -201,6 +201,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
| 201 | if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && | 201 | if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && |
| 202 | pdev->device == 0x1042) | 202 | pdev->device == 0x1042) |
| 203 | xhci->quirks |= XHCI_BROKEN_STREAMS; | 203 | xhci->quirks |= XHCI_BROKEN_STREAMS; |
| 204 | if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && | ||
| 205 | pdev->device == 0x1142) | ||
| 206 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; | ||
| 204 | 207 | ||
| 205 | if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) | 208 | if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) |
| 206 | xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; | 209 | xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; |
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 9c7ee26ef388..bc6a9be2ccc5 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c | |||
| @@ -245,6 +245,11 @@ static int dsps_check_status(struct musb *musb, void *unused) | |||
| 245 | dsps_mod_timer_optional(glue); | 245 | dsps_mod_timer_optional(glue); |
| 246 | break; | 246 | break; |
| 247 | case OTG_STATE_A_WAIT_BCON: | 247 | case OTG_STATE_A_WAIT_BCON: |
| 248 | /* keep VBUS on for host-only mode */ | ||
| 249 | if (musb->port_mode == MUSB_PORT_MODE_HOST) { | ||
| 250 | dsps_mod_timer_optional(glue); | ||
| 251 | break; | ||
| 252 | } | ||
| 248 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | 253 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); |
| 249 | skip_session = 1; | 254 | skip_session = 1; |
| 250 | /* fall */ | 255 | /* fall */ |
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 687ebb053438..41d7979d81c5 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c | |||
| @@ -1048,7 +1048,7 @@ void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs) | |||
| 1048 | 1048 | ||
| 1049 | for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE; | 1049 | for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE; |
| 1050 | i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) | 1050 | i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) |
| 1051 | if (PIXEL_CLOCK) | 1051 | if (PIXEL_CLOCK != 0) |
| 1052 | edt[num++] = block - edid; | 1052 | edt[num++] = block - edid; |
| 1053 | 1053 | ||
| 1054 | /* Yikes, EDID data is totally useless */ | 1054 | /* Yikes, EDID data is totally useless */ |
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index ec2e7e353685..449fceaf79d5 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c | |||
| @@ -1646,8 +1646,9 @@ static int ufx_usb_probe(struct usb_interface *interface, | |||
| 1646 | dev_dbg(dev->gdev, "%s %s - serial #%s\n", | 1646 | dev_dbg(dev->gdev, "%s %s - serial #%s\n", |
| 1647 | usbdev->manufacturer, usbdev->product, usbdev->serial); | 1647 | usbdev->manufacturer, usbdev->product, usbdev->serial); |
| 1648 | dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n", | 1648 | dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n", |
| 1649 | usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, | 1649 | le16_to_cpu(usbdev->descriptor.idVendor), |
| 1650 | usbdev->descriptor.bcdDevice, dev); | 1650 | le16_to_cpu(usbdev->descriptor.idProduct), |
| 1651 | le16_to_cpu(usbdev->descriptor.bcdDevice), dev); | ||
| 1651 | dev_dbg(dev->gdev, "console enable=%d\n", console); | 1652 | dev_dbg(dev->gdev, "console enable=%d\n", console); |
| 1652 | dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio); | 1653 | dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio); |
| 1653 | 1654 | ||
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index 6a3c353de7c3..05ef657235df 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c | |||
| @@ -1105,8 +1105,8 @@ static int dlfb_ops_blank(int blank_mode, struct fb_info *info) | |||
| 1105 | char *bufptr; | 1105 | char *bufptr; |
| 1106 | struct urb *urb; | 1106 | struct urb *urb; |
| 1107 | 1107 | ||
| 1108 | pr_info("/dev/fb%d FB_BLANK mode %d --> %d\n", | 1108 | pr_debug("/dev/fb%d FB_BLANK mode %d --> %d\n", |
| 1109 | info->node, dev->blank_mode, blank_mode); | 1109 | info->node, dev->blank_mode, blank_mode); |
| 1110 | 1110 | ||
| 1111 | if ((dev->blank_mode == FB_BLANK_POWERDOWN) && | 1111 | if ((dev->blank_mode == FB_BLANK_POWERDOWN) && |
| 1112 | (blank_mode != FB_BLANK_POWERDOWN)) { | 1112 | (blank_mode != FB_BLANK_POWERDOWN)) { |
| @@ -1613,8 +1613,9 @@ static int dlfb_usb_probe(struct usb_interface *interface, | |||
| 1613 | pr_info("%s %s - serial #%s\n", | 1613 | pr_info("%s %s - serial #%s\n", |
| 1614 | usbdev->manufacturer, usbdev->product, usbdev->serial); | 1614 | usbdev->manufacturer, usbdev->product, usbdev->serial); |
| 1615 | pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n", | 1615 | pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n", |
| 1616 | usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, | 1616 | le16_to_cpu(usbdev->descriptor.idVendor), |
| 1617 | usbdev->descriptor.bcdDevice, dev); | 1617 | le16_to_cpu(usbdev->descriptor.idProduct), |
| 1618 | le16_to_cpu(usbdev->descriptor.bcdDevice), dev); | ||
| 1618 | pr_info("console enable=%d\n", console); | 1619 | pr_info("console enable=%d\n", console); |
| 1619 | pr_info("fb_defio enable=%d\n", fb_defio); | 1620 | pr_info("fb_defio enable=%d\n", fb_defio); |
| 1620 | pr_info("shadow enable=%d\n", shadow); | 1621 | pr_info("shadow enable=%d\n", shadow); |
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c index f9718f012aae..badee04ef496 100644 --- a/drivers/video/fbdev/via/viafbdev.c +++ b/drivers/video/fbdev/via/viafbdev.c | |||
| @@ -1630,16 +1630,14 @@ static void viafb_init_proc(struct viafb_shared *shared) | |||
| 1630 | } | 1630 | } |
| 1631 | static void viafb_remove_proc(struct viafb_shared *shared) | 1631 | static void viafb_remove_proc(struct viafb_shared *shared) |
| 1632 | { | 1632 | { |
| 1633 | struct proc_dir_entry *viafb_entry = shared->proc_entry, | 1633 | struct proc_dir_entry *viafb_entry = shared->proc_entry; |
| 1634 | *iga1_entry = shared->iga1_proc_entry, | ||
| 1635 | *iga2_entry = shared->iga2_proc_entry; | ||
| 1636 | 1634 | ||
| 1637 | if (!viafb_entry) | 1635 | if (!viafb_entry) |
| 1638 | return; | 1636 | return; |
| 1639 | 1637 | ||
| 1640 | remove_proc_entry("output_devices", iga2_entry); | 1638 | remove_proc_entry("output_devices", shared->iga2_proc_entry); |
| 1641 | remove_proc_entry("iga2", viafb_entry); | 1639 | remove_proc_entry("iga2", viafb_entry); |
| 1642 | remove_proc_entry("output_devices", iga1_entry); | 1640 | remove_proc_entry("output_devices", shared->iga1_proc_entry); |
| 1643 | remove_proc_entry("iga1", viafb_entry); | 1641 | remove_proc_entry("iga1", viafb_entry); |
| 1644 | remove_proc_entry("supported_output_devices", viafb_entry); | 1642 | remove_proc_entry("supported_output_devices", viafb_entry); |
| 1645 | 1643 | ||
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 408c174ef0d5..22caf808bfab 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
| @@ -663,6 +663,12 @@ static int virtballoon_restore(struct virtio_device *vdev) | |||
| 663 | } | 663 | } |
| 664 | #endif | 664 | #endif |
| 665 | 665 | ||
| 666 | static int virtballoon_validate(struct virtio_device *vdev) | ||
| 667 | { | ||
| 668 | __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM); | ||
| 669 | return 0; | ||
| 670 | } | ||
| 671 | |||
| 666 | static unsigned int features[] = { | 672 | static unsigned int features[] = { |
| 667 | VIRTIO_BALLOON_F_MUST_TELL_HOST, | 673 | VIRTIO_BALLOON_F_MUST_TELL_HOST, |
| 668 | VIRTIO_BALLOON_F_STATS_VQ, | 674 | VIRTIO_BALLOON_F_STATS_VQ, |
| @@ -675,6 +681,7 @@ static struct virtio_driver virtio_balloon_driver = { | |||
| 675 | .driver.name = KBUILD_MODNAME, | 681 | .driver.name = KBUILD_MODNAME, |
| 676 | .driver.owner = THIS_MODULE, | 682 | .driver.owner = THIS_MODULE, |
| 677 | .id_table = id_table, | 683 | .id_table = id_table, |
| 684 | .validate = virtballoon_validate, | ||
| 678 | .probe = virtballoon_probe, | 685 | .probe = virtballoon_probe, |
| 679 | .remove = virtballoon_remove, | 686 | .remove = virtballoon_remove, |
| 680 | .config_changed = virtballoon_changed, | 687 | .config_changed = virtballoon_changed, |
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 7a92a5e1d40c..feca75b07fdd 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c | |||
| @@ -362,8 +362,8 @@ static int mmap_batch_fn(void *data, int nr, void *state) | |||
| 362 | st->global_error = 1; | 362 | st->global_error = 1; |
| 363 | } | 363 | } |
| 364 | } | 364 | } |
| 365 | st->va += PAGE_SIZE * nr; | 365 | st->va += XEN_PAGE_SIZE * nr; |
| 366 | st->index += nr; | 366 | st->index += nr / XEN_PFN_PER_PAGE; |
| 367 | 367 | ||
| 368 | return 0; | 368 | return 0; |
| 369 | } | 369 | } |
